Commit 74e42361fa3bc102647ad1e1ec7c21b747658843
Committed by
Greg Kroah-Hartman
1 parent
b82eaa1b48
Exists in
smarc-ti-linux-3.14.y
and in
1 other branch
btrfs: set proper message level for skinny metadata
commit 5efa0490cc94aee06cd8d282683e22a8ce0a0026 upstream. This has been confusing people for too long, the message is really just informative. Signed-off-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <clm@fb.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
fs/btrfs/disk-io.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | 2 | * Copyright (C) 2007 Oracle. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public | 5 | * modify it under the terms of the GNU General Public |
6 | * License v2 as published by the Free Software Foundation. | 6 | * License v2 as published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public | 13 | * You should have received a copy of the GNU General Public |
14 | * License along with this program; if not, write to the | 14 | * License along with this program; if not, write to the |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/fs.h> | 19 | #include <linux/fs.h> |
20 | #include <linux/blkdev.h> | 20 | #include <linux/blkdev.h> |
21 | #include <linux/scatterlist.h> | 21 | #include <linux/scatterlist.h> |
22 | #include <linux/swap.h> | 22 | #include <linux/swap.h> |
23 | #include <linux/radix-tree.h> | 23 | #include <linux/radix-tree.h> |
24 | #include <linux/writeback.h> | 24 | #include <linux/writeback.h> |
25 | #include <linux/buffer_head.h> | 25 | #include <linux/buffer_head.h> |
26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | #include <linux/kthread.h> | 27 | #include <linux/kthread.h> |
28 | #include <linux/freezer.h> | 28 | #include <linux/freezer.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/migrate.h> | 30 | #include <linux/migrate.h> |
31 | #include <linux/ratelimit.h> | 31 | #include <linux/ratelimit.h> |
32 | #include <linux/uuid.h> | 32 | #include <linux/uuid.h> |
33 | #include <linux/semaphore.h> | 33 | #include <linux/semaphore.h> |
34 | #include <asm/unaligned.h> | 34 | #include <asm/unaligned.h> |
35 | #include "ctree.h" | 35 | #include "ctree.h" |
36 | #include "disk-io.h" | 36 | #include "disk-io.h" |
37 | #include "hash.h" | 37 | #include "hash.h" |
38 | #include "transaction.h" | 38 | #include "transaction.h" |
39 | #include "btrfs_inode.h" | 39 | #include "btrfs_inode.h" |
40 | #include "volumes.h" | 40 | #include "volumes.h" |
41 | #include "print-tree.h" | 41 | #include "print-tree.h" |
42 | #include "async-thread.h" | 42 | #include "async-thread.h" |
43 | #include "locking.h" | 43 | #include "locking.h" |
44 | #include "tree-log.h" | 44 | #include "tree-log.h" |
45 | #include "free-space-cache.h" | 45 | #include "free-space-cache.h" |
46 | #include "inode-map.h" | 46 | #include "inode-map.h" |
47 | #include "check-integrity.h" | 47 | #include "check-integrity.h" |
48 | #include "rcu-string.h" | 48 | #include "rcu-string.h" |
49 | #include "dev-replace.h" | 49 | #include "dev-replace.h" |
50 | #include "raid56.h" | 50 | #include "raid56.h" |
51 | #include "sysfs.h" | 51 | #include "sysfs.h" |
52 | 52 | ||
53 | #ifdef CONFIG_X86 | 53 | #ifdef CONFIG_X86 |
54 | #include <asm/cpufeature.h> | 54 | #include <asm/cpufeature.h> |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | static struct extent_io_ops btree_extent_io_ops; | 57 | static struct extent_io_ops btree_extent_io_ops; |
58 | static void end_workqueue_fn(struct btrfs_work *work); | 58 | static void end_workqueue_fn(struct btrfs_work *work); |
59 | static void free_fs_root(struct btrfs_root *root); | 59 | static void free_fs_root(struct btrfs_root *root); |
60 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | 60 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, |
61 | int read_only); | 61 | int read_only); |
62 | static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, | 62 | static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, |
63 | struct btrfs_root *root); | 63 | struct btrfs_root *root); |
64 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root); | 64 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root); |
65 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | 65 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
66 | struct btrfs_root *root); | 66 | struct btrfs_root *root); |
67 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); | 67 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); |
68 | static int btrfs_destroy_marked_extents(struct btrfs_root *root, | 68 | static int btrfs_destroy_marked_extents(struct btrfs_root *root, |
69 | struct extent_io_tree *dirty_pages, | 69 | struct extent_io_tree *dirty_pages, |
70 | int mark); | 70 | int mark); |
71 | static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | 71 | static int btrfs_destroy_pinned_extent(struct btrfs_root *root, |
72 | struct extent_io_tree *pinned_extents); | 72 | struct extent_io_tree *pinned_extents); |
73 | static int btrfs_cleanup_transaction(struct btrfs_root *root); | 73 | static int btrfs_cleanup_transaction(struct btrfs_root *root); |
74 | static void btrfs_error_commit_super(struct btrfs_root *root); | 74 | static void btrfs_error_commit_super(struct btrfs_root *root); |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * end_io_wq structs are used to do processing in task context when an IO is | 77 | * end_io_wq structs are used to do processing in task context when an IO is |
78 | * complete. This is used during reads to verify checksums, and it is used | 78 | * complete. This is used during reads to verify checksums, and it is used |
79 | * by writes to insert metadata for new file extents after IO is complete. | 79 | * by writes to insert metadata for new file extents after IO is complete. |
80 | */ | 80 | */ |
81 | struct end_io_wq { | 81 | struct end_io_wq { |
82 | struct bio *bio; | 82 | struct bio *bio; |
83 | bio_end_io_t *end_io; | 83 | bio_end_io_t *end_io; |
84 | void *private; | 84 | void *private; |
85 | struct btrfs_fs_info *info; | 85 | struct btrfs_fs_info *info; |
86 | int error; | 86 | int error; |
87 | int metadata; | 87 | int metadata; |
88 | struct list_head list; | 88 | struct list_head list; |
89 | struct btrfs_work work; | 89 | struct btrfs_work work; |
90 | }; | 90 | }; |
91 | 91 | ||
92 | /* | 92 | /* |
93 | * async submit bios are used to offload expensive checksumming | 93 | * async submit bios are used to offload expensive checksumming |
94 | * onto the worker threads. They checksum file and metadata bios | 94 | * onto the worker threads. They checksum file and metadata bios |
95 | * just before they are sent down the IO stack. | 95 | * just before they are sent down the IO stack. |
96 | */ | 96 | */ |
97 | struct async_submit_bio { | 97 | struct async_submit_bio { |
98 | struct inode *inode; | 98 | struct inode *inode; |
99 | struct bio *bio; | 99 | struct bio *bio; |
100 | struct list_head list; | 100 | struct list_head list; |
101 | extent_submit_bio_hook_t *submit_bio_start; | 101 | extent_submit_bio_hook_t *submit_bio_start; |
102 | extent_submit_bio_hook_t *submit_bio_done; | 102 | extent_submit_bio_hook_t *submit_bio_done; |
103 | int rw; | 103 | int rw; |
104 | int mirror_num; | 104 | int mirror_num; |
105 | unsigned long bio_flags; | 105 | unsigned long bio_flags; |
106 | /* | 106 | /* |
107 | * bio_offset is optional, can be used if the pages in the bio | 107 | * bio_offset is optional, can be used if the pages in the bio |
108 | * can't tell us where in the file the bio should go | 108 | * can't tell us where in the file the bio should go |
109 | */ | 109 | */ |
110 | u64 bio_offset; | 110 | u64 bio_offset; |
111 | struct btrfs_work work; | 111 | struct btrfs_work work; |
112 | int error; | 112 | int error; |
113 | }; | 113 | }; |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * Lockdep class keys for extent_buffer->lock's in this root. For a given | 116 | * Lockdep class keys for extent_buffer->lock's in this root. For a given |
117 | * eb, the lockdep key is determined by the btrfs_root it belongs to and | 117 | * eb, the lockdep key is determined by the btrfs_root it belongs to and |
118 | * the level the eb occupies in the tree. | 118 | * the level the eb occupies in the tree. |
119 | * | 119 | * |
120 | * Different roots are used for different purposes and may nest inside each | 120 | * Different roots are used for different purposes and may nest inside each |
121 | * other and they require separate keysets. As lockdep keys should be | 121 | * other and they require separate keysets. As lockdep keys should be |
122 | * static, assign keysets according to the purpose of the root as indicated | 122 | * static, assign keysets according to the purpose of the root as indicated |
123 | * by btrfs_root->objectid. This ensures that all special purpose roots | 123 | * by btrfs_root->objectid. This ensures that all special purpose roots |
124 | * have separate keysets. | 124 | * have separate keysets. |
125 | * | 125 | * |
126 | * Lock-nesting across peer nodes is always done with the immediate parent | 126 | * Lock-nesting across peer nodes is always done with the immediate parent |
127 | * node locked thus preventing deadlock. As lockdep doesn't know this, use | 127 | * node locked thus preventing deadlock. As lockdep doesn't know this, use |
128 | * subclass to avoid triggering lockdep warning in such cases. | 128 | * subclass to avoid triggering lockdep warning in such cases. |
129 | * | 129 | * |
130 | * The key is set by the readpage_end_io_hook after the buffer has passed | 130 | * The key is set by the readpage_end_io_hook after the buffer has passed |
131 | * csum validation but before the pages are unlocked. It is also set by | 131 | * csum validation but before the pages are unlocked. It is also set by |
132 | * btrfs_init_new_buffer on freshly allocated blocks. | 132 | * btrfs_init_new_buffer on freshly allocated blocks. |
133 | * | 133 | * |
134 | * We also add a check to make sure the highest level of the tree is the | 134 | * We also add a check to make sure the highest level of the tree is the |
135 | * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code | 135 | * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code |
136 | * needs update as well. | 136 | * needs update as well. |
137 | */ | 137 | */ |
138 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 138 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
139 | # if BTRFS_MAX_LEVEL != 8 | 139 | # if BTRFS_MAX_LEVEL != 8 |
140 | # error | 140 | # error |
141 | # endif | 141 | # endif |
142 | 142 | ||
143 | static struct btrfs_lockdep_keyset { | 143 | static struct btrfs_lockdep_keyset { |
144 | u64 id; /* root objectid */ | 144 | u64 id; /* root objectid */ |
145 | const char *name_stem; /* lock name stem */ | 145 | const char *name_stem; /* lock name stem */ |
146 | char names[BTRFS_MAX_LEVEL + 1][20]; | 146 | char names[BTRFS_MAX_LEVEL + 1][20]; |
147 | struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; | 147 | struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; |
148 | } btrfs_lockdep_keysets[] = { | 148 | } btrfs_lockdep_keysets[] = { |
149 | { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, | 149 | { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, |
150 | { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, | 150 | { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, |
151 | { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, | 151 | { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, |
152 | { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, | 152 | { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, |
153 | { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, | 153 | { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, |
154 | { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, | 154 | { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, |
155 | { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, | 155 | { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, |
156 | { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, | 156 | { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, |
157 | { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, | 157 | { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, |
158 | { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, | 158 | { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, |
159 | { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, | 159 | { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, |
160 | { .id = 0, .name_stem = "tree" }, | 160 | { .id = 0, .name_stem = "tree" }, |
161 | }; | 161 | }; |
162 | 162 | ||
163 | void __init btrfs_init_lockdep(void) | 163 | void __init btrfs_init_lockdep(void) |
164 | { | 164 | { |
165 | int i, j; | 165 | int i, j; |
166 | 166 | ||
167 | /* initialize lockdep class names */ | 167 | /* initialize lockdep class names */ |
168 | for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { | 168 | for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { |
169 | struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; | 169 | struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; |
170 | 170 | ||
171 | for (j = 0; j < ARRAY_SIZE(ks->names); j++) | 171 | for (j = 0; j < ARRAY_SIZE(ks->names); j++) |
172 | snprintf(ks->names[j], sizeof(ks->names[j]), | 172 | snprintf(ks->names[j], sizeof(ks->names[j]), |
173 | "btrfs-%s-%02d", ks->name_stem, j); | 173 | "btrfs-%s-%02d", ks->name_stem, j); |
174 | } | 174 | } |
175 | } | 175 | } |
176 | 176 | ||
177 | void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, | 177 | void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, |
178 | int level) | 178 | int level) |
179 | { | 179 | { |
180 | struct btrfs_lockdep_keyset *ks; | 180 | struct btrfs_lockdep_keyset *ks; |
181 | 181 | ||
182 | BUG_ON(level >= ARRAY_SIZE(ks->keys)); | 182 | BUG_ON(level >= ARRAY_SIZE(ks->keys)); |
183 | 183 | ||
184 | /* find the matching keyset, id 0 is the default entry */ | 184 | /* find the matching keyset, id 0 is the default entry */ |
185 | for (ks = btrfs_lockdep_keysets; ks->id; ks++) | 185 | for (ks = btrfs_lockdep_keysets; ks->id; ks++) |
186 | if (ks->id == objectid) | 186 | if (ks->id == objectid) |
187 | break; | 187 | break; |
188 | 188 | ||
189 | lockdep_set_class_and_name(&eb->lock, | 189 | lockdep_set_class_and_name(&eb->lock, |
190 | &ks->keys[level], ks->names[level]); | 190 | &ks->keys[level], ks->names[level]); |
191 | } | 191 | } |
192 | 192 | ||
193 | #endif | 193 | #endif |
194 | 194 | ||
195 | /* | 195 | /* |
196 | * extents on the btree inode are pretty simple, there's one extent | 196 | * extents on the btree inode are pretty simple, there's one extent |
197 | * that covers the entire device | 197 | * that covers the entire device |
198 | */ | 198 | */ |
199 | static struct extent_map *btree_get_extent(struct inode *inode, | 199 | static struct extent_map *btree_get_extent(struct inode *inode, |
200 | struct page *page, size_t pg_offset, u64 start, u64 len, | 200 | struct page *page, size_t pg_offset, u64 start, u64 len, |
201 | int create) | 201 | int create) |
202 | { | 202 | { |
203 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 203 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
204 | struct extent_map *em; | 204 | struct extent_map *em; |
205 | int ret; | 205 | int ret; |
206 | 206 | ||
207 | read_lock(&em_tree->lock); | 207 | read_lock(&em_tree->lock); |
208 | em = lookup_extent_mapping(em_tree, start, len); | 208 | em = lookup_extent_mapping(em_tree, start, len); |
209 | if (em) { | 209 | if (em) { |
210 | em->bdev = | 210 | em->bdev = |
211 | BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | 211 | BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; |
212 | read_unlock(&em_tree->lock); | 212 | read_unlock(&em_tree->lock); |
213 | goto out; | 213 | goto out; |
214 | } | 214 | } |
215 | read_unlock(&em_tree->lock); | 215 | read_unlock(&em_tree->lock); |
216 | 216 | ||
217 | em = alloc_extent_map(); | 217 | em = alloc_extent_map(); |
218 | if (!em) { | 218 | if (!em) { |
219 | em = ERR_PTR(-ENOMEM); | 219 | em = ERR_PTR(-ENOMEM); |
220 | goto out; | 220 | goto out; |
221 | } | 221 | } |
222 | em->start = 0; | 222 | em->start = 0; |
223 | em->len = (u64)-1; | 223 | em->len = (u64)-1; |
224 | em->block_len = (u64)-1; | 224 | em->block_len = (u64)-1; |
225 | em->block_start = 0; | 225 | em->block_start = 0; |
226 | em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | 226 | em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; |
227 | 227 | ||
228 | write_lock(&em_tree->lock); | 228 | write_lock(&em_tree->lock); |
229 | ret = add_extent_mapping(em_tree, em, 0); | 229 | ret = add_extent_mapping(em_tree, em, 0); |
230 | if (ret == -EEXIST) { | 230 | if (ret == -EEXIST) { |
231 | free_extent_map(em); | 231 | free_extent_map(em); |
232 | em = lookup_extent_mapping(em_tree, start, len); | 232 | em = lookup_extent_mapping(em_tree, start, len); |
233 | if (!em) | 233 | if (!em) |
234 | em = ERR_PTR(-EIO); | 234 | em = ERR_PTR(-EIO); |
235 | } else if (ret) { | 235 | } else if (ret) { |
236 | free_extent_map(em); | 236 | free_extent_map(em); |
237 | em = ERR_PTR(ret); | 237 | em = ERR_PTR(ret); |
238 | } | 238 | } |
239 | write_unlock(&em_tree->lock); | 239 | write_unlock(&em_tree->lock); |
240 | 240 | ||
241 | out: | 241 | out: |
242 | return em; | 242 | return em; |
243 | } | 243 | } |
244 | 244 | ||
245 | u32 btrfs_csum_data(char *data, u32 seed, size_t len) | 245 | u32 btrfs_csum_data(char *data, u32 seed, size_t len) |
246 | { | 246 | { |
247 | return btrfs_crc32c(seed, data, len); | 247 | return btrfs_crc32c(seed, data, len); |
248 | } | 248 | } |
249 | 249 | ||
250 | void btrfs_csum_final(u32 crc, char *result) | 250 | void btrfs_csum_final(u32 crc, char *result) |
251 | { | 251 | { |
252 | put_unaligned_le32(~crc, result); | 252 | put_unaligned_le32(~crc, result); |
253 | } | 253 | } |
254 | 254 | ||
255 | /* | 255 | /* |
256 | * compute the csum for a btree block, and either verify it or write it | 256 | * compute the csum for a btree block, and either verify it or write it |
257 | * into the csum field of the block. | 257 | * into the csum field of the block. |
258 | */ | 258 | */ |
259 | static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, | 259 | static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, |
260 | int verify) | 260 | int verify) |
261 | { | 261 | { |
262 | u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); | 262 | u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); |
263 | char *result = NULL; | 263 | char *result = NULL; |
264 | unsigned long len; | 264 | unsigned long len; |
265 | unsigned long cur_len; | 265 | unsigned long cur_len; |
266 | unsigned long offset = BTRFS_CSUM_SIZE; | 266 | unsigned long offset = BTRFS_CSUM_SIZE; |
267 | char *kaddr; | 267 | char *kaddr; |
268 | unsigned long map_start; | 268 | unsigned long map_start; |
269 | unsigned long map_len; | 269 | unsigned long map_len; |
270 | int err; | 270 | int err; |
271 | u32 crc = ~(u32)0; | 271 | u32 crc = ~(u32)0; |
272 | unsigned long inline_result; | 272 | unsigned long inline_result; |
273 | 273 | ||
274 | len = buf->len - offset; | 274 | len = buf->len - offset; |
275 | while (len > 0) { | 275 | while (len > 0) { |
276 | err = map_private_extent_buffer(buf, offset, 32, | 276 | err = map_private_extent_buffer(buf, offset, 32, |
277 | &kaddr, &map_start, &map_len); | 277 | &kaddr, &map_start, &map_len); |
278 | if (err) | 278 | if (err) |
279 | return 1; | 279 | return 1; |
280 | cur_len = min(len, map_len - (offset - map_start)); | 280 | cur_len = min(len, map_len - (offset - map_start)); |
281 | crc = btrfs_csum_data(kaddr + offset - map_start, | 281 | crc = btrfs_csum_data(kaddr + offset - map_start, |
282 | crc, cur_len); | 282 | crc, cur_len); |
283 | len -= cur_len; | 283 | len -= cur_len; |
284 | offset += cur_len; | 284 | offset += cur_len; |
285 | } | 285 | } |
286 | if (csum_size > sizeof(inline_result)) { | 286 | if (csum_size > sizeof(inline_result)) { |
287 | result = kzalloc(csum_size * sizeof(char), GFP_NOFS); | 287 | result = kzalloc(csum_size * sizeof(char), GFP_NOFS); |
288 | if (!result) | 288 | if (!result) |
289 | return 1; | 289 | return 1; |
290 | } else { | 290 | } else { |
291 | result = (char *)&inline_result; | 291 | result = (char *)&inline_result; |
292 | } | 292 | } |
293 | 293 | ||
294 | btrfs_csum_final(crc, result); | 294 | btrfs_csum_final(crc, result); |
295 | 295 | ||
296 | if (verify) { | 296 | if (verify) { |
297 | if (memcmp_extent_buffer(buf, result, 0, csum_size)) { | 297 | if (memcmp_extent_buffer(buf, result, 0, csum_size)) { |
298 | u32 val; | 298 | u32 val; |
299 | u32 found = 0; | 299 | u32 found = 0; |
300 | memcpy(&found, result, csum_size); | 300 | memcpy(&found, result, csum_size); |
301 | 301 | ||
302 | read_extent_buffer(buf, &val, 0, csum_size); | 302 | read_extent_buffer(buf, &val, 0, csum_size); |
303 | printk_ratelimited(KERN_INFO | 303 | printk_ratelimited(KERN_INFO |
304 | "BTRFS: %s checksum verify failed on %llu wanted %X found %X " | 304 | "BTRFS: %s checksum verify failed on %llu wanted %X found %X " |
305 | "level %d\n", | 305 | "level %d\n", |
306 | root->fs_info->sb->s_id, buf->start, | 306 | root->fs_info->sb->s_id, buf->start, |
307 | val, found, btrfs_header_level(buf)); | 307 | val, found, btrfs_header_level(buf)); |
308 | if (result != (char *)&inline_result) | 308 | if (result != (char *)&inline_result) |
309 | kfree(result); | 309 | kfree(result); |
310 | return 1; | 310 | return 1; |
311 | } | 311 | } |
312 | } else { | 312 | } else { |
313 | write_extent_buffer(buf, result, 0, csum_size); | 313 | write_extent_buffer(buf, result, 0, csum_size); |
314 | } | 314 | } |
315 | if (result != (char *)&inline_result) | 315 | if (result != (char *)&inline_result) |
316 | kfree(result); | 316 | kfree(result); |
317 | return 0; | 317 | return 0; |
318 | } | 318 | } |
319 | 319 | ||
320 | /* | 320 | /* |
321 | * we can't consider a given block up to date unless the transid of the | 321 | * we can't consider a given block up to date unless the transid of the |
322 | * block matches the transid in the parent node's pointer. This is how we | 322 | * block matches the transid in the parent node's pointer. This is how we |
323 | * detect blocks that either didn't get written at all or got written | 323 | * detect blocks that either didn't get written at all or got written |
324 | * in the wrong place. | 324 | * in the wrong place. |
325 | */ | 325 | */ |
326 | static int verify_parent_transid(struct extent_io_tree *io_tree, | 326 | static int verify_parent_transid(struct extent_io_tree *io_tree, |
327 | struct extent_buffer *eb, u64 parent_transid, | 327 | struct extent_buffer *eb, u64 parent_transid, |
328 | int atomic) | 328 | int atomic) |
329 | { | 329 | { |
330 | struct extent_state *cached_state = NULL; | 330 | struct extent_state *cached_state = NULL; |
331 | int ret; | 331 | int ret; |
332 | 332 | ||
333 | if (!parent_transid || btrfs_header_generation(eb) == parent_transid) | 333 | if (!parent_transid || btrfs_header_generation(eb) == parent_transid) |
334 | return 0; | 334 | return 0; |
335 | 335 | ||
336 | if (atomic) | 336 | if (atomic) |
337 | return -EAGAIN; | 337 | return -EAGAIN; |
338 | 338 | ||
339 | lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, | 339 | lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, |
340 | 0, &cached_state); | 340 | 0, &cached_state); |
341 | if (extent_buffer_uptodate(eb) && | 341 | if (extent_buffer_uptodate(eb) && |
342 | btrfs_header_generation(eb) == parent_transid) { | 342 | btrfs_header_generation(eb) == parent_transid) { |
343 | ret = 0; | 343 | ret = 0; |
344 | goto out; | 344 | goto out; |
345 | } | 345 | } |
346 | printk_ratelimited("parent transid verify failed on %llu wanted %llu " | 346 | printk_ratelimited("parent transid verify failed on %llu wanted %llu " |
347 | "found %llu\n", | 347 | "found %llu\n", |
348 | eb->start, parent_transid, btrfs_header_generation(eb)); | 348 | eb->start, parent_transid, btrfs_header_generation(eb)); |
349 | ret = 1; | 349 | ret = 1; |
350 | clear_extent_buffer_uptodate(eb); | 350 | clear_extent_buffer_uptodate(eb); |
351 | out: | 351 | out: |
352 | unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, | 352 | unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, |
353 | &cached_state, GFP_NOFS); | 353 | &cached_state, GFP_NOFS); |
354 | return ret; | 354 | return ret; |
355 | } | 355 | } |
356 | 356 | ||
357 | /* | 357 | /* |
358 | * Return 0 if the superblock checksum type matches the checksum value of that | 358 | * Return 0 if the superblock checksum type matches the checksum value of that |
359 | * algorithm. Pass the raw disk superblock data. | 359 | * algorithm. Pass the raw disk superblock data. |
360 | */ | 360 | */ |
361 | static int btrfs_check_super_csum(char *raw_disk_sb) | 361 | static int btrfs_check_super_csum(char *raw_disk_sb) |
362 | { | 362 | { |
363 | struct btrfs_super_block *disk_sb = | 363 | struct btrfs_super_block *disk_sb = |
364 | (struct btrfs_super_block *)raw_disk_sb; | 364 | (struct btrfs_super_block *)raw_disk_sb; |
365 | u16 csum_type = btrfs_super_csum_type(disk_sb); | 365 | u16 csum_type = btrfs_super_csum_type(disk_sb); |
366 | int ret = 0; | 366 | int ret = 0; |
367 | 367 | ||
368 | if (csum_type == BTRFS_CSUM_TYPE_CRC32) { | 368 | if (csum_type == BTRFS_CSUM_TYPE_CRC32) { |
369 | u32 crc = ~(u32)0; | 369 | u32 crc = ~(u32)0; |
370 | const int csum_size = sizeof(crc); | 370 | const int csum_size = sizeof(crc); |
371 | char result[csum_size]; | 371 | char result[csum_size]; |
372 | 372 | ||
373 | /* | 373 | /* |
374 | * The super_block structure does not span the whole | 374 | * The super_block structure does not span the whole |
375 | * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space | 375 | * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space |
376 | * is filled with zeros and is included in the checkum. | 376 | * is filled with zeros and is included in the checkum. |
377 | */ | 377 | */ |
378 | crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, | 378 | crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, |
379 | crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); | 379 | crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); |
380 | btrfs_csum_final(crc, result); | 380 | btrfs_csum_final(crc, result); |
381 | 381 | ||
382 | if (memcmp(raw_disk_sb, result, csum_size)) | 382 | if (memcmp(raw_disk_sb, result, csum_size)) |
383 | ret = 1; | 383 | ret = 1; |
384 | 384 | ||
385 | if (ret && btrfs_super_generation(disk_sb) < 10) { | 385 | if (ret && btrfs_super_generation(disk_sb) < 10) { |
386 | printk(KERN_WARNING | 386 | printk(KERN_WARNING |
387 | "BTRFS: super block crcs don't match, older mkfs detected\n"); | 387 | "BTRFS: super block crcs don't match, older mkfs detected\n"); |
388 | ret = 0; | 388 | ret = 0; |
389 | } | 389 | } |
390 | } | 390 | } |
391 | 391 | ||
392 | if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { | 392 | if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { |
393 | printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n", | 393 | printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n", |
394 | csum_type); | 394 | csum_type); |
395 | ret = 1; | 395 | ret = 1; |
396 | } | 396 | } |
397 | 397 | ||
398 | return ret; | 398 | return ret; |
399 | } | 399 | } |
400 | 400 | ||
401 | /* | 401 | /* |
402 | * helper to read a given tree block, doing retries as required when | 402 | * helper to read a given tree block, doing retries as required when |
403 | * the checksums don't match and we have alternate mirrors to try. | 403 | * the checksums don't match and we have alternate mirrors to try. |
404 | */ | 404 | */ |
405 | static int btree_read_extent_buffer_pages(struct btrfs_root *root, | 405 | static int btree_read_extent_buffer_pages(struct btrfs_root *root, |
406 | struct extent_buffer *eb, | 406 | struct extent_buffer *eb, |
407 | u64 start, u64 parent_transid) | 407 | u64 start, u64 parent_transid) |
408 | { | 408 | { |
409 | struct extent_io_tree *io_tree; | 409 | struct extent_io_tree *io_tree; |
410 | int failed = 0; | 410 | int failed = 0; |
411 | int ret; | 411 | int ret; |
412 | int num_copies = 0; | 412 | int num_copies = 0; |
413 | int mirror_num = 0; | 413 | int mirror_num = 0; |
414 | int failed_mirror = 0; | 414 | int failed_mirror = 0; |
415 | 415 | ||
416 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); | 416 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
417 | io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; | 417 | io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; |
418 | while (1) { | 418 | while (1) { |
419 | ret = read_extent_buffer_pages(io_tree, eb, start, | 419 | ret = read_extent_buffer_pages(io_tree, eb, start, |
420 | WAIT_COMPLETE, | 420 | WAIT_COMPLETE, |
421 | btree_get_extent, mirror_num); | 421 | btree_get_extent, mirror_num); |
422 | if (!ret) { | 422 | if (!ret) { |
423 | if (!verify_parent_transid(io_tree, eb, | 423 | if (!verify_parent_transid(io_tree, eb, |
424 | parent_transid, 0)) | 424 | parent_transid, 0)) |
425 | break; | 425 | break; |
426 | else | 426 | else |
427 | ret = -EIO; | 427 | ret = -EIO; |
428 | } | 428 | } |
429 | 429 | ||
430 | /* | 430 | /* |
431 | * This buffer's crc is fine, but its contents are corrupted, so | 431 | * This buffer's crc is fine, but its contents are corrupted, so |
432 | * there is no reason to read the other copies, they won't be | 432 | * there is no reason to read the other copies, they won't be |
433 | * any less wrong. | 433 | * any less wrong. |
434 | */ | 434 | */ |
435 | if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) | 435 | if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) |
436 | break; | 436 | break; |
437 | 437 | ||
438 | num_copies = btrfs_num_copies(root->fs_info, | 438 | num_copies = btrfs_num_copies(root->fs_info, |
439 | eb->start, eb->len); | 439 | eb->start, eb->len); |
440 | if (num_copies == 1) | 440 | if (num_copies == 1) |
441 | break; | 441 | break; |
442 | 442 | ||
443 | if (!failed_mirror) { | 443 | if (!failed_mirror) { |
444 | failed = 1; | 444 | failed = 1; |
445 | failed_mirror = eb->read_mirror; | 445 | failed_mirror = eb->read_mirror; |
446 | } | 446 | } |
447 | 447 | ||
448 | mirror_num++; | 448 | mirror_num++; |
449 | if (mirror_num == failed_mirror) | 449 | if (mirror_num == failed_mirror) |
450 | mirror_num++; | 450 | mirror_num++; |
451 | 451 | ||
452 | if (mirror_num > num_copies) | 452 | if (mirror_num > num_copies) |
453 | break; | 453 | break; |
454 | } | 454 | } |
455 | 455 | ||
456 | if (failed && !ret && failed_mirror) | 456 | if (failed && !ret && failed_mirror) |
457 | repair_eb_io_failure(root, eb, failed_mirror); | 457 | repair_eb_io_failure(root, eb, failed_mirror); |
458 | 458 | ||
459 | return ret; | 459 | return ret; |
460 | } | 460 | } |
461 | 461 | ||
462 | /* | 462 | /* |
463 | * checksum a dirty tree block before IO. This has extra checks to make sure | 463 | * checksum a dirty tree block before IO. This has extra checks to make sure |
464 | * we only fill in the checksum field in the first page of a multi-page block | 464 | * we only fill in the checksum field in the first page of a multi-page block |
465 | */ | 465 | */ |
466 | 466 | ||
467 | static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) | 467 | static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) |
468 | { | 468 | { |
469 | u64 start = page_offset(page); | 469 | u64 start = page_offset(page); |
470 | u64 found_start; | 470 | u64 found_start; |
471 | struct extent_buffer *eb; | 471 | struct extent_buffer *eb; |
472 | 472 | ||
473 | eb = (struct extent_buffer *)page->private; | 473 | eb = (struct extent_buffer *)page->private; |
474 | if (page != eb->pages[0]) | 474 | if (page != eb->pages[0]) |
475 | return 0; | 475 | return 0; |
476 | found_start = btrfs_header_bytenr(eb); | 476 | found_start = btrfs_header_bytenr(eb); |
477 | if (WARN_ON(found_start != start || !PageUptodate(page))) | 477 | if (WARN_ON(found_start != start || !PageUptodate(page))) |
478 | return 0; | 478 | return 0; |
479 | csum_tree_block(root, eb, 0); | 479 | csum_tree_block(root, eb, 0); |
480 | return 0; | 480 | return 0; |
481 | } | 481 | } |
482 | 482 | ||
483 | static int check_tree_block_fsid(struct btrfs_root *root, | 483 | static int check_tree_block_fsid(struct btrfs_root *root, |
484 | struct extent_buffer *eb) | 484 | struct extent_buffer *eb) |
485 | { | 485 | { |
486 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | 486 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; |
487 | u8 fsid[BTRFS_UUID_SIZE]; | 487 | u8 fsid[BTRFS_UUID_SIZE]; |
488 | int ret = 1; | 488 | int ret = 1; |
489 | 489 | ||
490 | read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); | 490 | read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); |
491 | while (fs_devices) { | 491 | while (fs_devices) { |
492 | if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { | 492 | if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { |
493 | ret = 0; | 493 | ret = 0; |
494 | break; | 494 | break; |
495 | } | 495 | } |
496 | fs_devices = fs_devices->seed; | 496 | fs_devices = fs_devices->seed; |
497 | } | 497 | } |
498 | return ret; | 498 | return ret; |
499 | } | 499 | } |
500 | 500 | ||
501 | #define CORRUPT(reason, eb, root, slot) \ | 501 | #define CORRUPT(reason, eb, root, slot) \ |
502 | btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \ | 502 | btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \ |
503 | "root=%llu, slot=%d", reason, \ | 503 | "root=%llu, slot=%d", reason, \ |
504 | btrfs_header_bytenr(eb), root->objectid, slot) | 504 | btrfs_header_bytenr(eb), root->objectid, slot) |
505 | 505 | ||
506 | static noinline int check_leaf(struct btrfs_root *root, | 506 | static noinline int check_leaf(struct btrfs_root *root, |
507 | struct extent_buffer *leaf) | 507 | struct extent_buffer *leaf) |
508 | { | 508 | { |
509 | struct btrfs_key key; | 509 | struct btrfs_key key; |
510 | struct btrfs_key leaf_key; | 510 | struct btrfs_key leaf_key; |
511 | u32 nritems = btrfs_header_nritems(leaf); | 511 | u32 nritems = btrfs_header_nritems(leaf); |
512 | int slot; | 512 | int slot; |
513 | 513 | ||
514 | if (nritems == 0) | 514 | if (nritems == 0) |
515 | return 0; | 515 | return 0; |
516 | 516 | ||
517 | /* Check the 0 item */ | 517 | /* Check the 0 item */ |
518 | if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != | 518 | if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != |
519 | BTRFS_LEAF_DATA_SIZE(root)) { | 519 | BTRFS_LEAF_DATA_SIZE(root)) { |
520 | CORRUPT("invalid item offset size pair", leaf, root, 0); | 520 | CORRUPT("invalid item offset size pair", leaf, root, 0); |
521 | return -EIO; | 521 | return -EIO; |
522 | } | 522 | } |
523 | 523 | ||
524 | /* | 524 | /* |
525 | * Check to make sure each items keys are in the correct order and their | 525 | * Check to make sure each items keys are in the correct order and their |
526 | * offsets make sense. We only have to loop through nritems-1 because | 526 | * offsets make sense. We only have to loop through nritems-1 because |
527 | * we check the current slot against the next slot, which verifies the | 527 | * we check the current slot against the next slot, which verifies the |
528 | * next slot's offset+size makes sense and that the current's slot | 528 | * next slot's offset+size makes sense and that the current's slot |
529 | * offset is correct. | 529 | * offset is correct. |
530 | */ | 530 | */ |
531 | for (slot = 0; slot < nritems - 1; slot++) { | 531 | for (slot = 0; slot < nritems - 1; slot++) { |
532 | btrfs_item_key_to_cpu(leaf, &leaf_key, slot); | 532 | btrfs_item_key_to_cpu(leaf, &leaf_key, slot); |
533 | btrfs_item_key_to_cpu(leaf, &key, slot + 1); | 533 | btrfs_item_key_to_cpu(leaf, &key, slot + 1); |
534 | 534 | ||
535 | /* Make sure the keys are in the right order */ | 535 | /* Make sure the keys are in the right order */ |
536 | if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { | 536 | if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { |
537 | CORRUPT("bad key order", leaf, root, slot); | 537 | CORRUPT("bad key order", leaf, root, slot); |
538 | return -EIO; | 538 | return -EIO; |
539 | } | 539 | } |
540 | 540 | ||
541 | /* | 541 | /* |
542 | * Make sure the offset and ends are right, remember that the | 542 | * Make sure the offset and ends are right, remember that the |
543 | * item data starts at the end of the leaf and grows towards the | 543 | * item data starts at the end of the leaf and grows towards the |
544 | * front. | 544 | * front. |
545 | */ | 545 | */ |
546 | if (btrfs_item_offset_nr(leaf, slot) != | 546 | if (btrfs_item_offset_nr(leaf, slot) != |
547 | btrfs_item_end_nr(leaf, slot + 1)) { | 547 | btrfs_item_end_nr(leaf, slot + 1)) { |
548 | CORRUPT("slot offset bad", leaf, root, slot); | 548 | CORRUPT("slot offset bad", leaf, root, slot); |
549 | return -EIO; | 549 | return -EIO; |
550 | } | 550 | } |
551 | 551 | ||
552 | /* | 552 | /* |
553 | * Check to make sure that we don't point outside of the leaf, | 553 | * Check to make sure that we don't point outside of the leaf, |
554 | * just incase all the items are consistent to eachother, but | 554 | * just incase all the items are consistent to eachother, but |
555 | * all point outside of the leaf. | 555 | * all point outside of the leaf. |
556 | */ | 556 | */ |
557 | if (btrfs_item_end_nr(leaf, slot) > | 557 | if (btrfs_item_end_nr(leaf, slot) > |
558 | BTRFS_LEAF_DATA_SIZE(root)) { | 558 | BTRFS_LEAF_DATA_SIZE(root)) { |
559 | CORRUPT("slot end outside of leaf", leaf, root, slot); | 559 | CORRUPT("slot end outside of leaf", leaf, root, slot); |
560 | return -EIO; | 560 | return -EIO; |
561 | } | 561 | } |
562 | } | 562 | } |
563 | 563 | ||
564 | return 0; | 564 | return 0; |
565 | } | 565 | } |
566 | 566 | ||
567 | static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, | 567 | static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, |
568 | u64 phy_offset, struct page *page, | 568 | u64 phy_offset, struct page *page, |
569 | u64 start, u64 end, int mirror) | 569 | u64 start, u64 end, int mirror) |
570 | { | 570 | { |
571 | u64 found_start; | 571 | u64 found_start; |
572 | int found_level; | 572 | int found_level; |
573 | struct extent_buffer *eb; | 573 | struct extent_buffer *eb; |
574 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | 574 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; |
575 | int ret = 0; | 575 | int ret = 0; |
576 | int reads_done; | 576 | int reads_done; |
577 | 577 | ||
578 | if (!page->private) | 578 | if (!page->private) |
579 | goto out; | 579 | goto out; |
580 | 580 | ||
581 | eb = (struct extent_buffer *)page->private; | 581 | eb = (struct extent_buffer *)page->private; |
582 | 582 | ||
583 | /* the pending IO might have been the only thing that kept this buffer | 583 | /* the pending IO might have been the only thing that kept this buffer |
584 | * in memory. Make sure we have a ref for all this other checks | 584 | * in memory. Make sure we have a ref for all this other checks |
585 | */ | 585 | */ |
586 | extent_buffer_get(eb); | 586 | extent_buffer_get(eb); |
587 | 587 | ||
588 | reads_done = atomic_dec_and_test(&eb->io_pages); | 588 | reads_done = atomic_dec_and_test(&eb->io_pages); |
589 | if (!reads_done) | 589 | if (!reads_done) |
590 | goto err; | 590 | goto err; |
591 | 591 | ||
592 | eb->read_mirror = mirror; | 592 | eb->read_mirror = mirror; |
593 | if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) { | 593 | if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) { |
594 | ret = -EIO; | 594 | ret = -EIO; |
595 | goto err; | 595 | goto err; |
596 | } | 596 | } |
597 | 597 | ||
598 | found_start = btrfs_header_bytenr(eb); | 598 | found_start = btrfs_header_bytenr(eb); |
599 | if (found_start != eb->start) { | 599 | if (found_start != eb->start) { |
600 | printk_ratelimited(KERN_INFO "BTRFS: bad tree block start " | 600 | printk_ratelimited(KERN_INFO "BTRFS: bad tree block start " |
601 | "%llu %llu\n", | 601 | "%llu %llu\n", |
602 | found_start, eb->start); | 602 | found_start, eb->start); |
603 | ret = -EIO; | 603 | ret = -EIO; |
604 | goto err; | 604 | goto err; |
605 | } | 605 | } |
606 | if (check_tree_block_fsid(root, eb)) { | 606 | if (check_tree_block_fsid(root, eb)) { |
607 | printk_ratelimited(KERN_INFO "BTRFS: bad fsid on block %llu\n", | 607 | printk_ratelimited(KERN_INFO "BTRFS: bad fsid on block %llu\n", |
608 | eb->start); | 608 | eb->start); |
609 | ret = -EIO; | 609 | ret = -EIO; |
610 | goto err; | 610 | goto err; |
611 | } | 611 | } |
612 | found_level = btrfs_header_level(eb); | 612 | found_level = btrfs_header_level(eb); |
613 | if (found_level >= BTRFS_MAX_LEVEL) { | 613 | if (found_level >= BTRFS_MAX_LEVEL) { |
614 | btrfs_info(root->fs_info, "bad tree block level %d", | 614 | btrfs_info(root->fs_info, "bad tree block level %d", |
615 | (int)btrfs_header_level(eb)); | 615 | (int)btrfs_header_level(eb)); |
616 | ret = -EIO; | 616 | ret = -EIO; |
617 | goto err; | 617 | goto err; |
618 | } | 618 | } |
619 | 619 | ||
620 | btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), | 620 | btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), |
621 | eb, found_level); | 621 | eb, found_level); |
622 | 622 | ||
623 | ret = csum_tree_block(root, eb, 1); | 623 | ret = csum_tree_block(root, eb, 1); |
624 | if (ret) { | 624 | if (ret) { |
625 | ret = -EIO; | 625 | ret = -EIO; |
626 | goto err; | 626 | goto err; |
627 | } | 627 | } |
628 | 628 | ||
629 | /* | 629 | /* |
630 | * If this is a leaf block and it is corrupt, set the corrupt bit so | 630 | * If this is a leaf block and it is corrupt, set the corrupt bit so |
631 | * that we don't try and read the other copies of this block, just | 631 | * that we don't try and read the other copies of this block, just |
632 | * return -EIO. | 632 | * return -EIO. |
633 | */ | 633 | */ |
634 | if (found_level == 0 && check_leaf(root, eb)) { | 634 | if (found_level == 0 && check_leaf(root, eb)) { |
635 | set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); | 635 | set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); |
636 | ret = -EIO; | 636 | ret = -EIO; |
637 | } | 637 | } |
638 | 638 | ||
639 | if (!ret) | 639 | if (!ret) |
640 | set_extent_buffer_uptodate(eb); | 640 | set_extent_buffer_uptodate(eb); |
641 | err: | 641 | err: |
642 | if (reads_done && | 642 | if (reads_done && |
643 | test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) | 643 | test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) |
644 | btree_readahead_hook(root, eb, eb->start, ret); | 644 | btree_readahead_hook(root, eb, eb->start, ret); |
645 | 645 | ||
646 | if (ret) { | 646 | if (ret) { |
647 | /* | 647 | /* |
648 | * our io error hook is going to dec the io pages | 648 | * our io error hook is going to dec the io pages |
649 | * again, we have to make sure it has something | 649 | * again, we have to make sure it has something |
650 | * to decrement | 650 | * to decrement |
651 | */ | 651 | */ |
652 | atomic_inc(&eb->io_pages); | 652 | atomic_inc(&eb->io_pages); |
653 | clear_extent_buffer_uptodate(eb); | 653 | clear_extent_buffer_uptodate(eb); |
654 | } | 654 | } |
655 | free_extent_buffer(eb); | 655 | free_extent_buffer(eb); |
656 | out: | 656 | out: |
657 | return ret; | 657 | return ret; |
658 | } | 658 | } |
659 | 659 | ||
660 | static int btree_io_failed_hook(struct page *page, int failed_mirror) | 660 | static int btree_io_failed_hook(struct page *page, int failed_mirror) |
661 | { | 661 | { |
662 | struct extent_buffer *eb; | 662 | struct extent_buffer *eb; |
663 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | 663 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; |
664 | 664 | ||
665 | eb = (struct extent_buffer *)page->private; | 665 | eb = (struct extent_buffer *)page->private; |
666 | set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); | 666 | set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); |
667 | eb->read_mirror = failed_mirror; | 667 | eb->read_mirror = failed_mirror; |
668 | atomic_dec(&eb->io_pages); | 668 | atomic_dec(&eb->io_pages); |
669 | if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) | 669 | if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) |
670 | btree_readahead_hook(root, eb, eb->start, -EIO); | 670 | btree_readahead_hook(root, eb, eb->start, -EIO); |
671 | return -EIO; /* we fixed nothing */ | 671 | return -EIO; /* we fixed nothing */ |
672 | } | 672 | } |
673 | 673 | ||
674 | static void end_workqueue_bio(struct bio *bio, int err) | 674 | static void end_workqueue_bio(struct bio *bio, int err) |
675 | { | 675 | { |
676 | struct end_io_wq *end_io_wq = bio->bi_private; | 676 | struct end_io_wq *end_io_wq = bio->bi_private; |
677 | struct btrfs_fs_info *fs_info; | 677 | struct btrfs_fs_info *fs_info; |
678 | 678 | ||
679 | fs_info = end_io_wq->info; | 679 | fs_info = end_io_wq->info; |
680 | end_io_wq->error = err; | 680 | end_io_wq->error = err; |
681 | end_io_wq->work.func = end_workqueue_fn; | 681 | end_io_wq->work.func = end_workqueue_fn; |
682 | end_io_wq->work.flags = 0; | 682 | end_io_wq->work.flags = 0; |
683 | 683 | ||
684 | if (bio->bi_rw & REQ_WRITE) { | 684 | if (bio->bi_rw & REQ_WRITE) { |
685 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) | 685 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) |
686 | btrfs_queue_worker(&fs_info->endio_meta_write_workers, | 686 | btrfs_queue_worker(&fs_info->endio_meta_write_workers, |
687 | &end_io_wq->work); | 687 | &end_io_wq->work); |
688 | else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) | 688 | else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) |
689 | btrfs_queue_worker(&fs_info->endio_freespace_worker, | 689 | btrfs_queue_worker(&fs_info->endio_freespace_worker, |
690 | &end_io_wq->work); | 690 | &end_io_wq->work); |
691 | else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) | 691 | else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) |
692 | btrfs_queue_worker(&fs_info->endio_raid56_workers, | 692 | btrfs_queue_worker(&fs_info->endio_raid56_workers, |
693 | &end_io_wq->work); | 693 | &end_io_wq->work); |
694 | else | 694 | else |
695 | btrfs_queue_worker(&fs_info->endio_write_workers, | 695 | btrfs_queue_worker(&fs_info->endio_write_workers, |
696 | &end_io_wq->work); | 696 | &end_io_wq->work); |
697 | } else { | 697 | } else { |
698 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) | 698 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) |
699 | btrfs_queue_worker(&fs_info->endio_raid56_workers, | 699 | btrfs_queue_worker(&fs_info->endio_raid56_workers, |
700 | &end_io_wq->work); | 700 | &end_io_wq->work); |
701 | else if (end_io_wq->metadata) | 701 | else if (end_io_wq->metadata) |
702 | btrfs_queue_worker(&fs_info->endio_meta_workers, | 702 | btrfs_queue_worker(&fs_info->endio_meta_workers, |
703 | &end_io_wq->work); | 703 | &end_io_wq->work); |
704 | else | 704 | else |
705 | btrfs_queue_worker(&fs_info->endio_workers, | 705 | btrfs_queue_worker(&fs_info->endio_workers, |
706 | &end_io_wq->work); | 706 | &end_io_wq->work); |
707 | } | 707 | } |
708 | } | 708 | } |
709 | 709 | ||
710 | /* | 710 | /* |
711 | * For the metadata arg you want | 711 | * For the metadata arg you want |
712 | * | 712 | * |
713 | * 0 - if data | 713 | * 0 - if data |
714 | * 1 - if normal metadta | 714 | * 1 - if normal metadta |
715 | * 2 - if writing to the free space cache area | 715 | * 2 - if writing to the free space cache area |
716 | * 3 - raid parity work | 716 | * 3 - raid parity work |
717 | */ | 717 | */ |
718 | int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, | 718 | int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, |
719 | int metadata) | 719 | int metadata) |
720 | { | 720 | { |
721 | struct end_io_wq *end_io_wq; | 721 | struct end_io_wq *end_io_wq; |
722 | end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS); | 722 | end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS); |
723 | if (!end_io_wq) | 723 | if (!end_io_wq) |
724 | return -ENOMEM; | 724 | return -ENOMEM; |
725 | 725 | ||
726 | end_io_wq->private = bio->bi_private; | 726 | end_io_wq->private = bio->bi_private; |
727 | end_io_wq->end_io = bio->bi_end_io; | 727 | end_io_wq->end_io = bio->bi_end_io; |
728 | end_io_wq->info = info; | 728 | end_io_wq->info = info; |
729 | end_io_wq->error = 0; | 729 | end_io_wq->error = 0; |
730 | end_io_wq->bio = bio; | 730 | end_io_wq->bio = bio; |
731 | end_io_wq->metadata = metadata; | 731 | end_io_wq->metadata = metadata; |
732 | 732 | ||
733 | bio->bi_private = end_io_wq; | 733 | bio->bi_private = end_io_wq; |
734 | bio->bi_end_io = end_workqueue_bio; | 734 | bio->bi_end_io = end_workqueue_bio; |
735 | return 0; | 735 | return 0; |
736 | } | 736 | } |
737 | 737 | ||
738 | unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) | 738 | unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) |
739 | { | 739 | { |
740 | unsigned long limit = min_t(unsigned long, | 740 | unsigned long limit = min_t(unsigned long, |
741 | info->workers.max_workers, | 741 | info->workers.max_workers, |
742 | info->fs_devices->open_devices); | 742 | info->fs_devices->open_devices); |
743 | return 256 * limit; | 743 | return 256 * limit; |
744 | } | 744 | } |
745 | 745 | ||
746 | static void run_one_async_start(struct btrfs_work *work) | 746 | static void run_one_async_start(struct btrfs_work *work) |
747 | { | 747 | { |
748 | struct async_submit_bio *async; | 748 | struct async_submit_bio *async; |
749 | int ret; | 749 | int ret; |
750 | 750 | ||
751 | async = container_of(work, struct async_submit_bio, work); | 751 | async = container_of(work, struct async_submit_bio, work); |
752 | ret = async->submit_bio_start(async->inode, async->rw, async->bio, | 752 | ret = async->submit_bio_start(async->inode, async->rw, async->bio, |
753 | async->mirror_num, async->bio_flags, | 753 | async->mirror_num, async->bio_flags, |
754 | async->bio_offset); | 754 | async->bio_offset); |
755 | if (ret) | 755 | if (ret) |
756 | async->error = ret; | 756 | async->error = ret; |
757 | } | 757 | } |
758 | 758 | ||
759 | static void run_one_async_done(struct btrfs_work *work) | 759 | static void run_one_async_done(struct btrfs_work *work) |
760 | { | 760 | { |
761 | struct btrfs_fs_info *fs_info; | 761 | struct btrfs_fs_info *fs_info; |
762 | struct async_submit_bio *async; | 762 | struct async_submit_bio *async; |
763 | int limit; | 763 | int limit; |
764 | 764 | ||
765 | async = container_of(work, struct async_submit_bio, work); | 765 | async = container_of(work, struct async_submit_bio, work); |
766 | fs_info = BTRFS_I(async->inode)->root->fs_info; | 766 | fs_info = BTRFS_I(async->inode)->root->fs_info; |
767 | 767 | ||
768 | limit = btrfs_async_submit_limit(fs_info); | 768 | limit = btrfs_async_submit_limit(fs_info); |
769 | limit = limit * 2 / 3; | 769 | limit = limit * 2 / 3; |
770 | 770 | ||
771 | if (atomic_dec_return(&fs_info->nr_async_submits) < limit && | 771 | if (atomic_dec_return(&fs_info->nr_async_submits) < limit && |
772 | waitqueue_active(&fs_info->async_submit_wait)) | 772 | waitqueue_active(&fs_info->async_submit_wait)) |
773 | wake_up(&fs_info->async_submit_wait); | 773 | wake_up(&fs_info->async_submit_wait); |
774 | 774 | ||
775 | /* If an error occured we just want to clean up the bio and move on */ | 775 | /* If an error occured we just want to clean up the bio and move on */ |
776 | if (async->error) { | 776 | if (async->error) { |
777 | bio_endio(async->bio, async->error); | 777 | bio_endio(async->bio, async->error); |
778 | return; | 778 | return; |
779 | } | 779 | } |
780 | 780 | ||
781 | async->submit_bio_done(async->inode, async->rw, async->bio, | 781 | async->submit_bio_done(async->inode, async->rw, async->bio, |
782 | async->mirror_num, async->bio_flags, | 782 | async->mirror_num, async->bio_flags, |
783 | async->bio_offset); | 783 | async->bio_offset); |
784 | } | 784 | } |
785 | 785 | ||
786 | static void run_one_async_free(struct btrfs_work *work) | 786 | static void run_one_async_free(struct btrfs_work *work) |
787 | { | 787 | { |
788 | struct async_submit_bio *async; | 788 | struct async_submit_bio *async; |
789 | 789 | ||
790 | async = container_of(work, struct async_submit_bio, work); | 790 | async = container_of(work, struct async_submit_bio, work); |
791 | kfree(async); | 791 | kfree(async); |
792 | } | 792 | } |
793 | 793 | ||
794 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | 794 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, |
795 | int rw, struct bio *bio, int mirror_num, | 795 | int rw, struct bio *bio, int mirror_num, |
796 | unsigned long bio_flags, | 796 | unsigned long bio_flags, |
797 | u64 bio_offset, | 797 | u64 bio_offset, |
798 | extent_submit_bio_hook_t *submit_bio_start, | 798 | extent_submit_bio_hook_t *submit_bio_start, |
799 | extent_submit_bio_hook_t *submit_bio_done) | 799 | extent_submit_bio_hook_t *submit_bio_done) |
800 | { | 800 | { |
801 | struct async_submit_bio *async; | 801 | struct async_submit_bio *async; |
802 | 802 | ||
803 | async = kmalloc(sizeof(*async), GFP_NOFS); | 803 | async = kmalloc(sizeof(*async), GFP_NOFS); |
804 | if (!async) | 804 | if (!async) |
805 | return -ENOMEM; | 805 | return -ENOMEM; |
806 | 806 | ||
807 | async->inode = inode; | 807 | async->inode = inode; |
808 | async->rw = rw; | 808 | async->rw = rw; |
809 | async->bio = bio; | 809 | async->bio = bio; |
810 | async->mirror_num = mirror_num; | 810 | async->mirror_num = mirror_num; |
811 | async->submit_bio_start = submit_bio_start; | 811 | async->submit_bio_start = submit_bio_start; |
812 | async->submit_bio_done = submit_bio_done; | 812 | async->submit_bio_done = submit_bio_done; |
813 | 813 | ||
814 | async->work.func = run_one_async_start; | 814 | async->work.func = run_one_async_start; |
815 | async->work.ordered_func = run_one_async_done; | 815 | async->work.ordered_func = run_one_async_done; |
816 | async->work.ordered_free = run_one_async_free; | 816 | async->work.ordered_free = run_one_async_free; |
817 | 817 | ||
818 | async->work.flags = 0; | 818 | async->work.flags = 0; |
819 | async->bio_flags = bio_flags; | 819 | async->bio_flags = bio_flags; |
820 | async->bio_offset = bio_offset; | 820 | async->bio_offset = bio_offset; |
821 | 821 | ||
822 | async->error = 0; | 822 | async->error = 0; |
823 | 823 | ||
824 | atomic_inc(&fs_info->nr_async_submits); | 824 | atomic_inc(&fs_info->nr_async_submits); |
825 | 825 | ||
826 | if (rw & REQ_SYNC) | 826 | if (rw & REQ_SYNC) |
827 | btrfs_set_work_high_prio(&async->work); | 827 | btrfs_set_work_high_prio(&async->work); |
828 | 828 | ||
829 | btrfs_queue_worker(&fs_info->workers, &async->work); | 829 | btrfs_queue_worker(&fs_info->workers, &async->work); |
830 | 830 | ||
831 | while (atomic_read(&fs_info->async_submit_draining) && | 831 | while (atomic_read(&fs_info->async_submit_draining) && |
832 | atomic_read(&fs_info->nr_async_submits)) { | 832 | atomic_read(&fs_info->nr_async_submits)) { |
833 | wait_event(fs_info->async_submit_wait, | 833 | wait_event(fs_info->async_submit_wait, |
834 | (atomic_read(&fs_info->nr_async_submits) == 0)); | 834 | (atomic_read(&fs_info->nr_async_submits) == 0)); |
835 | } | 835 | } |
836 | 836 | ||
837 | return 0; | 837 | return 0; |
838 | } | 838 | } |
839 | 839 | ||
840 | static int btree_csum_one_bio(struct bio *bio) | 840 | static int btree_csum_one_bio(struct bio *bio) |
841 | { | 841 | { |
842 | struct bio_vec *bvec; | 842 | struct bio_vec *bvec; |
843 | struct btrfs_root *root; | 843 | struct btrfs_root *root; |
844 | int i, ret = 0; | 844 | int i, ret = 0; |
845 | 845 | ||
846 | bio_for_each_segment_all(bvec, bio, i) { | 846 | bio_for_each_segment_all(bvec, bio, i) { |
847 | root = BTRFS_I(bvec->bv_page->mapping->host)->root; | 847 | root = BTRFS_I(bvec->bv_page->mapping->host)->root; |
848 | ret = csum_dirty_buffer(root, bvec->bv_page); | 848 | ret = csum_dirty_buffer(root, bvec->bv_page); |
849 | if (ret) | 849 | if (ret) |
850 | break; | 850 | break; |
851 | } | 851 | } |
852 | 852 | ||
853 | return ret; | 853 | return ret; |
854 | } | 854 | } |
855 | 855 | ||
856 | static int __btree_submit_bio_start(struct inode *inode, int rw, | 856 | static int __btree_submit_bio_start(struct inode *inode, int rw, |
857 | struct bio *bio, int mirror_num, | 857 | struct bio *bio, int mirror_num, |
858 | unsigned long bio_flags, | 858 | unsigned long bio_flags, |
859 | u64 bio_offset) | 859 | u64 bio_offset) |
860 | { | 860 | { |
861 | /* | 861 | /* |
862 | * when we're called for a write, we're already in the async | 862 | * when we're called for a write, we're already in the async |
863 | * submission context. Just jump into btrfs_map_bio | 863 | * submission context. Just jump into btrfs_map_bio |
864 | */ | 864 | */ |
865 | return btree_csum_one_bio(bio); | 865 | return btree_csum_one_bio(bio); |
866 | } | 866 | } |
867 | 867 | ||
868 | static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, | 868 | static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, |
869 | int mirror_num, unsigned long bio_flags, | 869 | int mirror_num, unsigned long bio_flags, |
870 | u64 bio_offset) | 870 | u64 bio_offset) |
871 | { | 871 | { |
872 | int ret; | 872 | int ret; |
873 | 873 | ||
874 | /* | 874 | /* |
875 | * when we're called for a write, we're already in the async | 875 | * when we're called for a write, we're already in the async |
876 | * submission context. Just jump into btrfs_map_bio | 876 | * submission context. Just jump into btrfs_map_bio |
877 | */ | 877 | */ |
878 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); | 878 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); |
879 | if (ret) | 879 | if (ret) |
880 | bio_endio(bio, ret); | 880 | bio_endio(bio, ret); |
881 | return ret; | 881 | return ret; |
882 | } | 882 | } |
883 | 883 | ||
884 | static int check_async_write(struct inode *inode, unsigned long bio_flags) | 884 | static int check_async_write(struct inode *inode, unsigned long bio_flags) |
885 | { | 885 | { |
886 | if (bio_flags & EXTENT_BIO_TREE_LOG) | 886 | if (bio_flags & EXTENT_BIO_TREE_LOG) |
887 | return 0; | 887 | return 0; |
888 | #ifdef CONFIG_X86 | 888 | #ifdef CONFIG_X86 |
889 | if (cpu_has_xmm4_2) | 889 | if (cpu_has_xmm4_2) |
890 | return 0; | 890 | return 0; |
891 | #endif | 891 | #endif |
892 | return 1; | 892 | return 1; |
893 | } | 893 | } |
894 | 894 | ||
895 | static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | 895 | static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, |
896 | int mirror_num, unsigned long bio_flags, | 896 | int mirror_num, unsigned long bio_flags, |
897 | u64 bio_offset) | 897 | u64 bio_offset) |
898 | { | 898 | { |
899 | int async = check_async_write(inode, bio_flags); | 899 | int async = check_async_write(inode, bio_flags); |
900 | int ret; | 900 | int ret; |
901 | 901 | ||
902 | if (!(rw & REQ_WRITE)) { | 902 | if (!(rw & REQ_WRITE)) { |
903 | /* | 903 | /* |
904 | * called for a read, do the setup so that checksum validation | 904 | * called for a read, do the setup so that checksum validation |
905 | * can happen in the async kernel threads | 905 | * can happen in the async kernel threads |
906 | */ | 906 | */ |
907 | ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, | 907 | ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, |
908 | bio, 1); | 908 | bio, 1); |
909 | if (ret) | 909 | if (ret) |
910 | goto out_w_error; | 910 | goto out_w_error; |
911 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, | 911 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, |
912 | mirror_num, 0); | 912 | mirror_num, 0); |
913 | } else if (!async) { | 913 | } else if (!async) { |
914 | ret = btree_csum_one_bio(bio); | 914 | ret = btree_csum_one_bio(bio); |
915 | if (ret) | 915 | if (ret) |
916 | goto out_w_error; | 916 | goto out_w_error; |
917 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, | 917 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, |
918 | mirror_num, 0); | 918 | mirror_num, 0); |
919 | } else { | 919 | } else { |
920 | /* | 920 | /* |
921 | * kthread helpers are used to submit writes so that | 921 | * kthread helpers are used to submit writes so that |
922 | * checksumming can happen in parallel across all CPUs | 922 | * checksumming can happen in parallel across all CPUs |
923 | */ | 923 | */ |
924 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, | 924 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, |
925 | inode, rw, bio, mirror_num, 0, | 925 | inode, rw, bio, mirror_num, 0, |
926 | bio_offset, | 926 | bio_offset, |
927 | __btree_submit_bio_start, | 927 | __btree_submit_bio_start, |
928 | __btree_submit_bio_done); | 928 | __btree_submit_bio_done); |
929 | } | 929 | } |
930 | 930 | ||
931 | if (ret) { | 931 | if (ret) { |
932 | out_w_error: | 932 | out_w_error: |
933 | bio_endio(bio, ret); | 933 | bio_endio(bio, ret); |
934 | } | 934 | } |
935 | return ret; | 935 | return ret; |
936 | } | 936 | } |
937 | 937 | ||
938 | #ifdef CONFIG_MIGRATION | 938 | #ifdef CONFIG_MIGRATION |
939 | static int btree_migratepage(struct address_space *mapping, | 939 | static int btree_migratepage(struct address_space *mapping, |
940 | struct page *newpage, struct page *page, | 940 | struct page *newpage, struct page *page, |
941 | enum migrate_mode mode) | 941 | enum migrate_mode mode) |
942 | { | 942 | { |
943 | /* | 943 | /* |
944 | * we can't safely write a btree page from here, | 944 | * we can't safely write a btree page from here, |
945 | * we haven't done the locking hook | 945 | * we haven't done the locking hook |
946 | */ | 946 | */ |
947 | if (PageDirty(page)) | 947 | if (PageDirty(page)) |
948 | return -EAGAIN; | 948 | return -EAGAIN; |
949 | /* | 949 | /* |
950 | * Buffers may be managed in a filesystem specific way. | 950 | * Buffers may be managed in a filesystem specific way. |
951 | * We must have no buffers or drop them. | 951 | * We must have no buffers or drop them. |
952 | */ | 952 | */ |
953 | if (page_has_private(page) && | 953 | if (page_has_private(page) && |
954 | !try_to_release_page(page, GFP_KERNEL)) | 954 | !try_to_release_page(page, GFP_KERNEL)) |
955 | return -EAGAIN; | 955 | return -EAGAIN; |
956 | return migrate_page(mapping, newpage, page, mode); | 956 | return migrate_page(mapping, newpage, page, mode); |
957 | } | 957 | } |
958 | #endif | 958 | #endif |
959 | 959 | ||
960 | 960 | ||
961 | static int btree_writepages(struct address_space *mapping, | 961 | static int btree_writepages(struct address_space *mapping, |
962 | struct writeback_control *wbc) | 962 | struct writeback_control *wbc) |
963 | { | 963 | { |
964 | struct btrfs_fs_info *fs_info; | 964 | struct btrfs_fs_info *fs_info; |
965 | int ret; | 965 | int ret; |
966 | 966 | ||
967 | if (wbc->sync_mode == WB_SYNC_NONE) { | 967 | if (wbc->sync_mode == WB_SYNC_NONE) { |
968 | 968 | ||
969 | if (wbc->for_kupdate) | 969 | if (wbc->for_kupdate) |
970 | return 0; | 970 | return 0; |
971 | 971 | ||
972 | fs_info = BTRFS_I(mapping->host)->root->fs_info; | 972 | fs_info = BTRFS_I(mapping->host)->root->fs_info; |
973 | /* this is a bit racy, but that's ok */ | 973 | /* this is a bit racy, but that's ok */ |
974 | ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, | 974 | ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, |
975 | BTRFS_DIRTY_METADATA_THRESH); | 975 | BTRFS_DIRTY_METADATA_THRESH); |
976 | if (ret < 0) | 976 | if (ret < 0) |
977 | return 0; | 977 | return 0; |
978 | } | 978 | } |
979 | return btree_write_cache_pages(mapping, wbc); | 979 | return btree_write_cache_pages(mapping, wbc); |
980 | } | 980 | } |
981 | 981 | ||
982 | static int btree_readpage(struct file *file, struct page *page) | 982 | static int btree_readpage(struct file *file, struct page *page) |
983 | { | 983 | { |
984 | struct extent_io_tree *tree; | 984 | struct extent_io_tree *tree; |
985 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 985 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
986 | return extent_read_full_page(tree, page, btree_get_extent, 0); | 986 | return extent_read_full_page(tree, page, btree_get_extent, 0); |
987 | } | 987 | } |
988 | 988 | ||
989 | static int btree_releasepage(struct page *page, gfp_t gfp_flags) | 989 | static int btree_releasepage(struct page *page, gfp_t gfp_flags) |
990 | { | 990 | { |
991 | if (PageWriteback(page) || PageDirty(page)) | 991 | if (PageWriteback(page) || PageDirty(page)) |
992 | return 0; | 992 | return 0; |
993 | 993 | ||
994 | return try_release_extent_buffer(page); | 994 | return try_release_extent_buffer(page); |
995 | } | 995 | } |
996 | 996 | ||
997 | static void btree_invalidatepage(struct page *page, unsigned int offset, | 997 | static void btree_invalidatepage(struct page *page, unsigned int offset, |
998 | unsigned int length) | 998 | unsigned int length) |
999 | { | 999 | { |
1000 | struct extent_io_tree *tree; | 1000 | struct extent_io_tree *tree; |
1001 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 1001 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
1002 | extent_invalidatepage(tree, page, offset); | 1002 | extent_invalidatepage(tree, page, offset); |
1003 | btree_releasepage(page, GFP_NOFS); | 1003 | btree_releasepage(page, GFP_NOFS); |
1004 | if (PagePrivate(page)) { | 1004 | if (PagePrivate(page)) { |
1005 | btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, | 1005 | btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, |
1006 | "page private not zero on page %llu", | 1006 | "page private not zero on page %llu", |
1007 | (unsigned long long)page_offset(page)); | 1007 | (unsigned long long)page_offset(page)); |
1008 | ClearPagePrivate(page); | 1008 | ClearPagePrivate(page); |
1009 | set_page_private(page, 0); | 1009 | set_page_private(page, 0); |
1010 | page_cache_release(page); | 1010 | page_cache_release(page); |
1011 | } | 1011 | } |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | static int btree_set_page_dirty(struct page *page) | 1014 | static int btree_set_page_dirty(struct page *page) |
1015 | { | 1015 | { |
1016 | #ifdef DEBUG | 1016 | #ifdef DEBUG |
1017 | struct extent_buffer *eb; | 1017 | struct extent_buffer *eb; |
1018 | 1018 | ||
1019 | BUG_ON(!PagePrivate(page)); | 1019 | BUG_ON(!PagePrivate(page)); |
1020 | eb = (struct extent_buffer *)page->private; | 1020 | eb = (struct extent_buffer *)page->private; |
1021 | BUG_ON(!eb); | 1021 | BUG_ON(!eb); |
1022 | BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); | 1022 | BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); |
1023 | BUG_ON(!atomic_read(&eb->refs)); | 1023 | BUG_ON(!atomic_read(&eb->refs)); |
1024 | btrfs_assert_tree_locked(eb); | 1024 | btrfs_assert_tree_locked(eb); |
1025 | #endif | 1025 | #endif |
1026 | return __set_page_dirty_nobuffers(page); | 1026 | return __set_page_dirty_nobuffers(page); |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | static const struct address_space_operations btree_aops = { | 1029 | static const struct address_space_operations btree_aops = { |
1030 | .readpage = btree_readpage, | 1030 | .readpage = btree_readpage, |
1031 | .writepages = btree_writepages, | 1031 | .writepages = btree_writepages, |
1032 | .releasepage = btree_releasepage, | 1032 | .releasepage = btree_releasepage, |
1033 | .invalidatepage = btree_invalidatepage, | 1033 | .invalidatepage = btree_invalidatepage, |
1034 | #ifdef CONFIG_MIGRATION | 1034 | #ifdef CONFIG_MIGRATION |
1035 | .migratepage = btree_migratepage, | 1035 | .migratepage = btree_migratepage, |
1036 | #endif | 1036 | #endif |
1037 | .set_page_dirty = btree_set_page_dirty, | 1037 | .set_page_dirty = btree_set_page_dirty, |
1038 | }; | 1038 | }; |
1039 | 1039 | ||
1040 | int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, | 1040 | int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, |
1041 | u64 parent_transid) | 1041 | u64 parent_transid) |
1042 | { | 1042 | { |
1043 | struct extent_buffer *buf = NULL; | 1043 | struct extent_buffer *buf = NULL; |
1044 | struct inode *btree_inode = root->fs_info->btree_inode; | 1044 | struct inode *btree_inode = root->fs_info->btree_inode; |
1045 | int ret = 0; | 1045 | int ret = 0; |
1046 | 1046 | ||
1047 | buf = btrfs_find_create_tree_block(root, bytenr, blocksize); | 1047 | buf = btrfs_find_create_tree_block(root, bytenr, blocksize); |
1048 | if (!buf) | 1048 | if (!buf) |
1049 | return 0; | 1049 | return 0; |
1050 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, | 1050 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, |
1051 | buf, 0, WAIT_NONE, btree_get_extent, 0); | 1051 | buf, 0, WAIT_NONE, btree_get_extent, 0); |
1052 | free_extent_buffer(buf); | 1052 | free_extent_buffer(buf); |
1053 | return ret; | 1053 | return ret; |
1054 | } | 1054 | } |
1055 | 1055 | ||
1056 | int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, | 1056 | int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, |
1057 | int mirror_num, struct extent_buffer **eb) | 1057 | int mirror_num, struct extent_buffer **eb) |
1058 | { | 1058 | { |
1059 | struct extent_buffer *buf = NULL; | 1059 | struct extent_buffer *buf = NULL; |
1060 | struct inode *btree_inode = root->fs_info->btree_inode; | 1060 | struct inode *btree_inode = root->fs_info->btree_inode; |
1061 | struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; | 1061 | struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; |
1062 | int ret; | 1062 | int ret; |
1063 | 1063 | ||
1064 | buf = btrfs_find_create_tree_block(root, bytenr, blocksize); | 1064 | buf = btrfs_find_create_tree_block(root, bytenr, blocksize); |
1065 | if (!buf) | 1065 | if (!buf) |
1066 | return 0; | 1066 | return 0; |
1067 | 1067 | ||
1068 | set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); | 1068 | set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); |
1069 | 1069 | ||
1070 | ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK, | 1070 | ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK, |
1071 | btree_get_extent, mirror_num); | 1071 | btree_get_extent, mirror_num); |
1072 | if (ret) { | 1072 | if (ret) { |
1073 | free_extent_buffer(buf); | 1073 | free_extent_buffer(buf); |
1074 | return ret; | 1074 | return ret; |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { | 1077 | if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { |
1078 | free_extent_buffer(buf); | 1078 | free_extent_buffer(buf); |
1079 | return -EIO; | 1079 | return -EIO; |
1080 | } else if (extent_buffer_uptodate(buf)) { | 1080 | } else if (extent_buffer_uptodate(buf)) { |
1081 | *eb = buf; | 1081 | *eb = buf; |
1082 | } else { | 1082 | } else { |
1083 | free_extent_buffer(buf); | 1083 | free_extent_buffer(buf); |
1084 | } | 1084 | } |
1085 | return 0; | 1085 | return 0; |
1086 | } | 1086 | } |
1087 | 1087 | ||
1088 | struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, | 1088 | struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, |
1089 | u64 bytenr, u32 blocksize) | 1089 | u64 bytenr, u32 blocksize) |
1090 | { | 1090 | { |
1091 | return find_extent_buffer(root->fs_info, bytenr); | 1091 | return find_extent_buffer(root->fs_info, bytenr); |
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, | 1094 | struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, |
1095 | u64 bytenr, u32 blocksize) | 1095 | u64 bytenr, u32 blocksize) |
1096 | { | 1096 | { |
1097 | return alloc_extent_buffer(root->fs_info, bytenr, blocksize); | 1097 | return alloc_extent_buffer(root->fs_info, bytenr, blocksize); |
1098 | } | 1098 | } |
1099 | 1099 | ||
1100 | 1100 | ||
1101 | int btrfs_write_tree_block(struct extent_buffer *buf) | 1101 | int btrfs_write_tree_block(struct extent_buffer *buf) |
1102 | { | 1102 | { |
1103 | return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, | 1103 | return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, |
1104 | buf->start + buf->len - 1); | 1104 | buf->start + buf->len - 1); |
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) | 1107 | int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) |
1108 | { | 1108 | { |
1109 | return filemap_fdatawait_range(buf->pages[0]->mapping, | 1109 | return filemap_fdatawait_range(buf->pages[0]->mapping, |
1110 | buf->start, buf->start + buf->len - 1); | 1110 | buf->start, buf->start + buf->len - 1); |
1111 | } | 1111 | } |
1112 | 1112 | ||
1113 | struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, | 1113 | struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, |
1114 | u32 blocksize, u64 parent_transid) | 1114 | u32 blocksize, u64 parent_transid) |
1115 | { | 1115 | { |
1116 | struct extent_buffer *buf = NULL; | 1116 | struct extent_buffer *buf = NULL; |
1117 | int ret; | 1117 | int ret; |
1118 | 1118 | ||
1119 | buf = btrfs_find_create_tree_block(root, bytenr, blocksize); | 1119 | buf = btrfs_find_create_tree_block(root, bytenr, blocksize); |
1120 | if (!buf) | 1120 | if (!buf) |
1121 | return NULL; | 1121 | return NULL; |
1122 | 1122 | ||
1123 | ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); | 1123 | ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); |
1124 | if (ret) { | 1124 | if (ret) { |
1125 | free_extent_buffer(buf); | 1125 | free_extent_buffer(buf); |
1126 | return NULL; | 1126 | return NULL; |
1127 | } | 1127 | } |
1128 | return buf; | 1128 | return buf; |
1129 | 1129 | ||
1130 | } | 1130 | } |
1131 | 1131 | ||
1132 | void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, | 1132 | void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
1133 | struct extent_buffer *buf) | 1133 | struct extent_buffer *buf) |
1134 | { | 1134 | { |
1135 | struct btrfs_fs_info *fs_info = root->fs_info; | 1135 | struct btrfs_fs_info *fs_info = root->fs_info; |
1136 | 1136 | ||
1137 | if (btrfs_header_generation(buf) == | 1137 | if (btrfs_header_generation(buf) == |
1138 | fs_info->running_transaction->transid) { | 1138 | fs_info->running_transaction->transid) { |
1139 | btrfs_assert_tree_locked(buf); | 1139 | btrfs_assert_tree_locked(buf); |
1140 | 1140 | ||
1141 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { | 1141 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { |
1142 | __percpu_counter_add(&fs_info->dirty_metadata_bytes, | 1142 | __percpu_counter_add(&fs_info->dirty_metadata_bytes, |
1143 | -buf->len, | 1143 | -buf->len, |
1144 | fs_info->dirty_metadata_batch); | 1144 | fs_info->dirty_metadata_batch); |
1145 | /* ugh, clear_extent_buffer_dirty needs to lock the page */ | 1145 | /* ugh, clear_extent_buffer_dirty needs to lock the page */ |
1146 | btrfs_set_lock_blocking(buf); | 1146 | btrfs_set_lock_blocking(buf); |
1147 | clear_extent_buffer_dirty(buf); | 1147 | clear_extent_buffer_dirty(buf); |
1148 | } | 1148 | } |
1149 | } | 1149 | } |
1150 | } | 1150 | } |
1151 | 1151 | ||
1152 | static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, | 1152 | static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, |
1153 | u32 stripesize, struct btrfs_root *root, | 1153 | u32 stripesize, struct btrfs_root *root, |
1154 | struct btrfs_fs_info *fs_info, | 1154 | struct btrfs_fs_info *fs_info, |
1155 | u64 objectid) | 1155 | u64 objectid) |
1156 | { | 1156 | { |
1157 | root->node = NULL; | 1157 | root->node = NULL; |
1158 | root->commit_root = NULL; | 1158 | root->commit_root = NULL; |
1159 | root->sectorsize = sectorsize; | 1159 | root->sectorsize = sectorsize; |
1160 | root->nodesize = nodesize; | 1160 | root->nodesize = nodesize; |
1161 | root->leafsize = leafsize; | 1161 | root->leafsize = leafsize; |
1162 | root->stripesize = stripesize; | 1162 | root->stripesize = stripesize; |
1163 | root->ref_cows = 0; | 1163 | root->ref_cows = 0; |
1164 | root->track_dirty = 0; | 1164 | root->track_dirty = 0; |
1165 | root->in_radix = 0; | 1165 | root->in_radix = 0; |
1166 | root->orphan_item_inserted = 0; | 1166 | root->orphan_item_inserted = 0; |
1167 | root->orphan_cleanup_state = 0; | 1167 | root->orphan_cleanup_state = 0; |
1168 | 1168 | ||
1169 | root->objectid = objectid; | 1169 | root->objectid = objectid; |
1170 | root->last_trans = 0; | 1170 | root->last_trans = 0; |
1171 | root->highest_objectid = 0; | 1171 | root->highest_objectid = 0; |
1172 | root->nr_delalloc_inodes = 0; | 1172 | root->nr_delalloc_inodes = 0; |
1173 | root->nr_ordered_extents = 0; | 1173 | root->nr_ordered_extents = 0; |
1174 | root->name = NULL; | 1174 | root->name = NULL; |
1175 | root->inode_tree = RB_ROOT; | 1175 | root->inode_tree = RB_ROOT; |
1176 | INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); | 1176 | INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); |
1177 | root->block_rsv = NULL; | 1177 | root->block_rsv = NULL; |
1178 | root->orphan_block_rsv = NULL; | 1178 | root->orphan_block_rsv = NULL; |
1179 | 1179 | ||
1180 | INIT_LIST_HEAD(&root->dirty_list); | 1180 | INIT_LIST_HEAD(&root->dirty_list); |
1181 | INIT_LIST_HEAD(&root->root_list); | 1181 | INIT_LIST_HEAD(&root->root_list); |
1182 | INIT_LIST_HEAD(&root->delalloc_inodes); | 1182 | INIT_LIST_HEAD(&root->delalloc_inodes); |
1183 | INIT_LIST_HEAD(&root->delalloc_root); | 1183 | INIT_LIST_HEAD(&root->delalloc_root); |
1184 | INIT_LIST_HEAD(&root->ordered_extents); | 1184 | INIT_LIST_HEAD(&root->ordered_extents); |
1185 | INIT_LIST_HEAD(&root->ordered_root); | 1185 | INIT_LIST_HEAD(&root->ordered_root); |
1186 | INIT_LIST_HEAD(&root->logged_list[0]); | 1186 | INIT_LIST_HEAD(&root->logged_list[0]); |
1187 | INIT_LIST_HEAD(&root->logged_list[1]); | 1187 | INIT_LIST_HEAD(&root->logged_list[1]); |
1188 | spin_lock_init(&root->orphan_lock); | 1188 | spin_lock_init(&root->orphan_lock); |
1189 | spin_lock_init(&root->inode_lock); | 1189 | spin_lock_init(&root->inode_lock); |
1190 | spin_lock_init(&root->delalloc_lock); | 1190 | spin_lock_init(&root->delalloc_lock); |
1191 | spin_lock_init(&root->ordered_extent_lock); | 1191 | spin_lock_init(&root->ordered_extent_lock); |
1192 | spin_lock_init(&root->accounting_lock); | 1192 | spin_lock_init(&root->accounting_lock); |
1193 | spin_lock_init(&root->log_extents_lock[0]); | 1193 | spin_lock_init(&root->log_extents_lock[0]); |
1194 | spin_lock_init(&root->log_extents_lock[1]); | 1194 | spin_lock_init(&root->log_extents_lock[1]); |
1195 | mutex_init(&root->objectid_mutex); | 1195 | mutex_init(&root->objectid_mutex); |
1196 | mutex_init(&root->log_mutex); | 1196 | mutex_init(&root->log_mutex); |
1197 | init_waitqueue_head(&root->log_writer_wait); | 1197 | init_waitqueue_head(&root->log_writer_wait); |
1198 | init_waitqueue_head(&root->log_commit_wait[0]); | 1198 | init_waitqueue_head(&root->log_commit_wait[0]); |
1199 | init_waitqueue_head(&root->log_commit_wait[1]); | 1199 | init_waitqueue_head(&root->log_commit_wait[1]); |
1200 | atomic_set(&root->log_commit[0], 0); | 1200 | atomic_set(&root->log_commit[0], 0); |
1201 | atomic_set(&root->log_commit[1], 0); | 1201 | atomic_set(&root->log_commit[1], 0); |
1202 | atomic_set(&root->log_writers, 0); | 1202 | atomic_set(&root->log_writers, 0); |
1203 | atomic_set(&root->log_batch, 0); | 1203 | atomic_set(&root->log_batch, 0); |
1204 | atomic_set(&root->orphan_inodes, 0); | 1204 | atomic_set(&root->orphan_inodes, 0); |
1205 | atomic_set(&root->refs, 1); | 1205 | atomic_set(&root->refs, 1); |
1206 | root->log_transid = 0; | 1206 | root->log_transid = 0; |
1207 | root->last_log_commit = 0; | 1207 | root->last_log_commit = 0; |
1208 | if (fs_info) | 1208 | if (fs_info) |
1209 | extent_io_tree_init(&root->dirty_log_pages, | 1209 | extent_io_tree_init(&root->dirty_log_pages, |
1210 | fs_info->btree_inode->i_mapping); | 1210 | fs_info->btree_inode->i_mapping); |
1211 | 1211 | ||
1212 | memset(&root->root_key, 0, sizeof(root->root_key)); | 1212 | memset(&root->root_key, 0, sizeof(root->root_key)); |
1213 | memset(&root->root_item, 0, sizeof(root->root_item)); | 1213 | memset(&root->root_item, 0, sizeof(root->root_item)); |
1214 | memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); | 1214 | memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); |
1215 | memset(&root->root_kobj, 0, sizeof(root->root_kobj)); | 1215 | memset(&root->root_kobj, 0, sizeof(root->root_kobj)); |
1216 | if (fs_info) | 1216 | if (fs_info) |
1217 | root->defrag_trans_start = fs_info->generation; | 1217 | root->defrag_trans_start = fs_info->generation; |
1218 | else | 1218 | else |
1219 | root->defrag_trans_start = 0; | 1219 | root->defrag_trans_start = 0; |
1220 | init_completion(&root->kobj_unregister); | 1220 | init_completion(&root->kobj_unregister); |
1221 | root->defrag_running = 0; | 1221 | root->defrag_running = 0; |
1222 | root->root_key.objectid = objectid; | 1222 | root->root_key.objectid = objectid; |
1223 | root->anon_dev = 0; | 1223 | root->anon_dev = 0; |
1224 | 1224 | ||
1225 | spin_lock_init(&root->root_item_lock); | 1225 | spin_lock_init(&root->root_item_lock); |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info) | 1228 | static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info) |
1229 | { | 1229 | { |
1230 | struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS); | 1230 | struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS); |
1231 | if (root) | 1231 | if (root) |
1232 | root->fs_info = fs_info; | 1232 | root->fs_info = fs_info; |
1233 | return root; | 1233 | return root; |
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 1236 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
1237 | /* Should only be used by the testing infrastructure */ | 1237 | /* Should only be used by the testing infrastructure */ |
1238 | struct btrfs_root *btrfs_alloc_dummy_root(void) | 1238 | struct btrfs_root *btrfs_alloc_dummy_root(void) |
1239 | { | 1239 | { |
1240 | struct btrfs_root *root; | 1240 | struct btrfs_root *root; |
1241 | 1241 | ||
1242 | root = btrfs_alloc_root(NULL); | 1242 | root = btrfs_alloc_root(NULL); |
1243 | if (!root) | 1243 | if (!root) |
1244 | return ERR_PTR(-ENOMEM); | 1244 | return ERR_PTR(-ENOMEM); |
1245 | __setup_root(4096, 4096, 4096, 4096, root, NULL, 1); | 1245 | __setup_root(4096, 4096, 4096, 4096, root, NULL, 1); |
1246 | root->dummy_root = 1; | 1246 | root->dummy_root = 1; |
1247 | 1247 | ||
1248 | return root; | 1248 | return root; |
1249 | } | 1249 | } |
1250 | #endif | 1250 | #endif |
1251 | 1251 | ||
1252 | struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, | 1252 | struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, |
1253 | struct btrfs_fs_info *fs_info, | 1253 | struct btrfs_fs_info *fs_info, |
1254 | u64 objectid) | 1254 | u64 objectid) |
1255 | { | 1255 | { |
1256 | struct extent_buffer *leaf; | 1256 | struct extent_buffer *leaf; |
1257 | struct btrfs_root *tree_root = fs_info->tree_root; | 1257 | struct btrfs_root *tree_root = fs_info->tree_root; |
1258 | struct btrfs_root *root; | 1258 | struct btrfs_root *root; |
1259 | struct btrfs_key key; | 1259 | struct btrfs_key key; |
1260 | int ret = 0; | 1260 | int ret = 0; |
1261 | uuid_le uuid; | 1261 | uuid_le uuid; |
1262 | 1262 | ||
1263 | root = btrfs_alloc_root(fs_info); | 1263 | root = btrfs_alloc_root(fs_info); |
1264 | if (!root) | 1264 | if (!root) |
1265 | return ERR_PTR(-ENOMEM); | 1265 | return ERR_PTR(-ENOMEM); |
1266 | 1266 | ||
1267 | __setup_root(tree_root->nodesize, tree_root->leafsize, | 1267 | __setup_root(tree_root->nodesize, tree_root->leafsize, |
1268 | tree_root->sectorsize, tree_root->stripesize, | 1268 | tree_root->sectorsize, tree_root->stripesize, |
1269 | root, fs_info, objectid); | 1269 | root, fs_info, objectid); |
1270 | root->root_key.objectid = objectid; | 1270 | root->root_key.objectid = objectid; |
1271 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; | 1271 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; |
1272 | root->root_key.offset = 0; | 1272 | root->root_key.offset = 0; |
1273 | 1273 | ||
1274 | leaf = btrfs_alloc_free_block(trans, root, root->leafsize, | 1274 | leaf = btrfs_alloc_free_block(trans, root, root->leafsize, |
1275 | 0, objectid, NULL, 0, 0, 0); | 1275 | 0, objectid, NULL, 0, 0, 0); |
1276 | if (IS_ERR(leaf)) { | 1276 | if (IS_ERR(leaf)) { |
1277 | ret = PTR_ERR(leaf); | 1277 | ret = PTR_ERR(leaf); |
1278 | leaf = NULL; | 1278 | leaf = NULL; |
1279 | goto fail; | 1279 | goto fail; |
1280 | } | 1280 | } |
1281 | 1281 | ||
1282 | memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); | 1282 | memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); |
1283 | btrfs_set_header_bytenr(leaf, leaf->start); | 1283 | btrfs_set_header_bytenr(leaf, leaf->start); |
1284 | btrfs_set_header_generation(leaf, trans->transid); | 1284 | btrfs_set_header_generation(leaf, trans->transid); |
1285 | btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); | 1285 | btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); |
1286 | btrfs_set_header_owner(leaf, objectid); | 1286 | btrfs_set_header_owner(leaf, objectid); |
1287 | root->node = leaf; | 1287 | root->node = leaf; |
1288 | 1288 | ||
1289 | write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(), | 1289 | write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(), |
1290 | BTRFS_FSID_SIZE); | 1290 | BTRFS_FSID_SIZE); |
1291 | write_extent_buffer(leaf, fs_info->chunk_tree_uuid, | 1291 | write_extent_buffer(leaf, fs_info->chunk_tree_uuid, |
1292 | btrfs_header_chunk_tree_uuid(leaf), | 1292 | btrfs_header_chunk_tree_uuid(leaf), |
1293 | BTRFS_UUID_SIZE); | 1293 | BTRFS_UUID_SIZE); |
1294 | btrfs_mark_buffer_dirty(leaf); | 1294 | btrfs_mark_buffer_dirty(leaf); |
1295 | 1295 | ||
1296 | root->commit_root = btrfs_root_node(root); | 1296 | root->commit_root = btrfs_root_node(root); |
1297 | root->track_dirty = 1; | 1297 | root->track_dirty = 1; |
1298 | 1298 | ||
1299 | 1299 | ||
1300 | root->root_item.flags = 0; | 1300 | root->root_item.flags = 0; |
1301 | root->root_item.byte_limit = 0; | 1301 | root->root_item.byte_limit = 0; |
1302 | btrfs_set_root_bytenr(&root->root_item, leaf->start); | 1302 | btrfs_set_root_bytenr(&root->root_item, leaf->start); |
1303 | btrfs_set_root_generation(&root->root_item, trans->transid); | 1303 | btrfs_set_root_generation(&root->root_item, trans->transid); |
1304 | btrfs_set_root_level(&root->root_item, 0); | 1304 | btrfs_set_root_level(&root->root_item, 0); |
1305 | btrfs_set_root_refs(&root->root_item, 1); | 1305 | btrfs_set_root_refs(&root->root_item, 1); |
1306 | btrfs_set_root_used(&root->root_item, leaf->len); | 1306 | btrfs_set_root_used(&root->root_item, leaf->len); |
1307 | btrfs_set_root_last_snapshot(&root->root_item, 0); | 1307 | btrfs_set_root_last_snapshot(&root->root_item, 0); |
1308 | btrfs_set_root_dirid(&root->root_item, 0); | 1308 | btrfs_set_root_dirid(&root->root_item, 0); |
1309 | uuid_le_gen(&uuid); | 1309 | uuid_le_gen(&uuid); |
1310 | memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); | 1310 | memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); |
1311 | root->root_item.drop_level = 0; | 1311 | root->root_item.drop_level = 0; |
1312 | 1312 | ||
1313 | key.objectid = objectid; | 1313 | key.objectid = objectid; |
1314 | key.type = BTRFS_ROOT_ITEM_KEY; | 1314 | key.type = BTRFS_ROOT_ITEM_KEY; |
1315 | key.offset = 0; | 1315 | key.offset = 0; |
1316 | ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); | 1316 | ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); |
1317 | if (ret) | 1317 | if (ret) |
1318 | goto fail; | 1318 | goto fail; |
1319 | 1319 | ||
1320 | btrfs_tree_unlock(leaf); | 1320 | btrfs_tree_unlock(leaf); |
1321 | 1321 | ||
1322 | return root; | 1322 | return root; |
1323 | 1323 | ||
1324 | fail: | 1324 | fail: |
1325 | if (leaf) { | 1325 | if (leaf) { |
1326 | btrfs_tree_unlock(leaf); | 1326 | btrfs_tree_unlock(leaf); |
1327 | free_extent_buffer(leaf); | 1327 | free_extent_buffer(leaf); |
1328 | } | 1328 | } |
1329 | kfree(root); | 1329 | kfree(root); |
1330 | 1330 | ||
1331 | return ERR_PTR(ret); | 1331 | return ERR_PTR(ret); |
1332 | } | 1332 | } |
1333 | 1333 | ||
1334 | static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, | 1334 | static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, |
1335 | struct btrfs_fs_info *fs_info) | 1335 | struct btrfs_fs_info *fs_info) |
1336 | { | 1336 | { |
1337 | struct btrfs_root *root; | 1337 | struct btrfs_root *root; |
1338 | struct btrfs_root *tree_root = fs_info->tree_root; | 1338 | struct btrfs_root *tree_root = fs_info->tree_root; |
1339 | struct extent_buffer *leaf; | 1339 | struct extent_buffer *leaf; |
1340 | 1340 | ||
1341 | root = btrfs_alloc_root(fs_info); | 1341 | root = btrfs_alloc_root(fs_info); |
1342 | if (!root) | 1342 | if (!root) |
1343 | return ERR_PTR(-ENOMEM); | 1343 | return ERR_PTR(-ENOMEM); |
1344 | 1344 | ||
1345 | __setup_root(tree_root->nodesize, tree_root->leafsize, | 1345 | __setup_root(tree_root->nodesize, tree_root->leafsize, |
1346 | tree_root->sectorsize, tree_root->stripesize, | 1346 | tree_root->sectorsize, tree_root->stripesize, |
1347 | root, fs_info, BTRFS_TREE_LOG_OBJECTID); | 1347 | root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
1348 | 1348 | ||
1349 | root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; | 1349 | root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; |
1350 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; | 1350 | root->root_key.type = BTRFS_ROOT_ITEM_KEY; |
1351 | root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; | 1351 | root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; |
1352 | /* | 1352 | /* |
1353 | * log trees do not get reference counted because they go away | 1353 | * log trees do not get reference counted because they go away |
1354 | * before a real commit is actually done. They do store pointers | 1354 | * before a real commit is actually done. They do store pointers |
1355 | * to file data extents, and those reference counts still get | 1355 | * to file data extents, and those reference counts still get |
1356 | * updated (along with back refs to the log tree). | 1356 | * updated (along with back refs to the log tree). |
1357 | */ | 1357 | */ |
1358 | root->ref_cows = 0; | 1358 | root->ref_cows = 0; |
1359 | 1359 | ||
1360 | leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, | 1360 | leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, |
1361 | BTRFS_TREE_LOG_OBJECTID, NULL, | 1361 | BTRFS_TREE_LOG_OBJECTID, NULL, |
1362 | 0, 0, 0); | 1362 | 0, 0, 0); |
1363 | if (IS_ERR(leaf)) { | 1363 | if (IS_ERR(leaf)) { |
1364 | kfree(root); | 1364 | kfree(root); |
1365 | return ERR_CAST(leaf); | 1365 | return ERR_CAST(leaf); |
1366 | } | 1366 | } |
1367 | 1367 | ||
1368 | memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); | 1368 | memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); |
1369 | btrfs_set_header_bytenr(leaf, leaf->start); | 1369 | btrfs_set_header_bytenr(leaf, leaf->start); |
1370 | btrfs_set_header_generation(leaf, trans->transid); | 1370 | btrfs_set_header_generation(leaf, trans->transid); |
1371 | btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); | 1371 | btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); |
1372 | btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); | 1372 | btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); |
1373 | root->node = leaf; | 1373 | root->node = leaf; |
1374 | 1374 | ||
1375 | write_extent_buffer(root->node, root->fs_info->fsid, | 1375 | write_extent_buffer(root->node, root->fs_info->fsid, |
1376 | btrfs_header_fsid(), BTRFS_FSID_SIZE); | 1376 | btrfs_header_fsid(), BTRFS_FSID_SIZE); |
1377 | btrfs_mark_buffer_dirty(root->node); | 1377 | btrfs_mark_buffer_dirty(root->node); |
1378 | btrfs_tree_unlock(root->node); | 1378 | btrfs_tree_unlock(root->node); |
1379 | return root; | 1379 | return root; |
1380 | } | 1380 | } |
1381 | 1381 | ||
1382 | int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, | 1382 | int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, |
1383 | struct btrfs_fs_info *fs_info) | 1383 | struct btrfs_fs_info *fs_info) |
1384 | { | 1384 | { |
1385 | struct btrfs_root *log_root; | 1385 | struct btrfs_root *log_root; |
1386 | 1386 | ||
1387 | log_root = alloc_log_tree(trans, fs_info); | 1387 | log_root = alloc_log_tree(trans, fs_info); |
1388 | if (IS_ERR(log_root)) | 1388 | if (IS_ERR(log_root)) |
1389 | return PTR_ERR(log_root); | 1389 | return PTR_ERR(log_root); |
1390 | WARN_ON(fs_info->log_root_tree); | 1390 | WARN_ON(fs_info->log_root_tree); |
1391 | fs_info->log_root_tree = log_root; | 1391 | fs_info->log_root_tree = log_root; |
1392 | return 0; | 1392 | return 0; |
1393 | } | 1393 | } |
1394 | 1394 | ||
1395 | int btrfs_add_log_tree(struct btrfs_trans_handle *trans, | 1395 | int btrfs_add_log_tree(struct btrfs_trans_handle *trans, |
1396 | struct btrfs_root *root) | 1396 | struct btrfs_root *root) |
1397 | { | 1397 | { |
1398 | struct btrfs_root *log_root; | 1398 | struct btrfs_root *log_root; |
1399 | struct btrfs_inode_item *inode_item; | 1399 | struct btrfs_inode_item *inode_item; |
1400 | 1400 | ||
1401 | log_root = alloc_log_tree(trans, root->fs_info); | 1401 | log_root = alloc_log_tree(trans, root->fs_info); |
1402 | if (IS_ERR(log_root)) | 1402 | if (IS_ERR(log_root)) |
1403 | return PTR_ERR(log_root); | 1403 | return PTR_ERR(log_root); |
1404 | 1404 | ||
1405 | log_root->last_trans = trans->transid; | 1405 | log_root->last_trans = trans->transid; |
1406 | log_root->root_key.offset = root->root_key.objectid; | 1406 | log_root->root_key.offset = root->root_key.objectid; |
1407 | 1407 | ||
1408 | inode_item = &log_root->root_item.inode; | 1408 | inode_item = &log_root->root_item.inode; |
1409 | btrfs_set_stack_inode_generation(inode_item, 1); | 1409 | btrfs_set_stack_inode_generation(inode_item, 1); |
1410 | btrfs_set_stack_inode_size(inode_item, 3); | 1410 | btrfs_set_stack_inode_size(inode_item, 3); |
1411 | btrfs_set_stack_inode_nlink(inode_item, 1); | 1411 | btrfs_set_stack_inode_nlink(inode_item, 1); |
1412 | btrfs_set_stack_inode_nbytes(inode_item, root->leafsize); | 1412 | btrfs_set_stack_inode_nbytes(inode_item, root->leafsize); |
1413 | btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); | 1413 | btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); |
1414 | 1414 | ||
1415 | btrfs_set_root_node(&log_root->root_item, log_root->node); | 1415 | btrfs_set_root_node(&log_root->root_item, log_root->node); |
1416 | 1416 | ||
1417 | WARN_ON(root->log_root); | 1417 | WARN_ON(root->log_root); |
1418 | root->log_root = log_root; | 1418 | root->log_root = log_root; |
1419 | root->log_transid = 0; | 1419 | root->log_transid = 0; |
1420 | root->last_log_commit = 0; | 1420 | root->last_log_commit = 0; |
1421 | return 0; | 1421 | return 0; |
1422 | } | 1422 | } |
1423 | 1423 | ||
1424 | static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, | 1424 | static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, |
1425 | struct btrfs_key *key) | 1425 | struct btrfs_key *key) |
1426 | { | 1426 | { |
1427 | struct btrfs_root *root; | 1427 | struct btrfs_root *root; |
1428 | struct btrfs_fs_info *fs_info = tree_root->fs_info; | 1428 | struct btrfs_fs_info *fs_info = tree_root->fs_info; |
1429 | struct btrfs_path *path; | 1429 | struct btrfs_path *path; |
1430 | u64 generation; | 1430 | u64 generation; |
1431 | u32 blocksize; | 1431 | u32 blocksize; |
1432 | int ret; | 1432 | int ret; |
1433 | 1433 | ||
1434 | path = btrfs_alloc_path(); | 1434 | path = btrfs_alloc_path(); |
1435 | if (!path) | 1435 | if (!path) |
1436 | return ERR_PTR(-ENOMEM); | 1436 | return ERR_PTR(-ENOMEM); |
1437 | 1437 | ||
1438 | root = btrfs_alloc_root(fs_info); | 1438 | root = btrfs_alloc_root(fs_info); |
1439 | if (!root) { | 1439 | if (!root) { |
1440 | ret = -ENOMEM; | 1440 | ret = -ENOMEM; |
1441 | goto alloc_fail; | 1441 | goto alloc_fail; |
1442 | } | 1442 | } |
1443 | 1443 | ||
1444 | __setup_root(tree_root->nodesize, tree_root->leafsize, | 1444 | __setup_root(tree_root->nodesize, tree_root->leafsize, |
1445 | tree_root->sectorsize, tree_root->stripesize, | 1445 | tree_root->sectorsize, tree_root->stripesize, |
1446 | root, fs_info, key->objectid); | 1446 | root, fs_info, key->objectid); |
1447 | 1447 | ||
1448 | ret = btrfs_find_root(tree_root, key, path, | 1448 | ret = btrfs_find_root(tree_root, key, path, |
1449 | &root->root_item, &root->root_key); | 1449 | &root->root_item, &root->root_key); |
1450 | if (ret) { | 1450 | if (ret) { |
1451 | if (ret > 0) | 1451 | if (ret > 0) |
1452 | ret = -ENOENT; | 1452 | ret = -ENOENT; |
1453 | goto find_fail; | 1453 | goto find_fail; |
1454 | } | 1454 | } |
1455 | 1455 | ||
1456 | generation = btrfs_root_generation(&root->root_item); | 1456 | generation = btrfs_root_generation(&root->root_item); |
1457 | blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); | 1457 | blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); |
1458 | root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), | 1458 | root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), |
1459 | blocksize, generation); | 1459 | blocksize, generation); |
1460 | if (!root->node) { | 1460 | if (!root->node) { |
1461 | ret = -ENOMEM; | 1461 | ret = -ENOMEM; |
1462 | goto find_fail; | 1462 | goto find_fail; |
1463 | } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { | 1463 | } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { |
1464 | ret = -EIO; | 1464 | ret = -EIO; |
1465 | goto read_fail; | 1465 | goto read_fail; |
1466 | } | 1466 | } |
1467 | root->commit_root = btrfs_root_node(root); | 1467 | root->commit_root = btrfs_root_node(root); |
1468 | out: | 1468 | out: |
1469 | btrfs_free_path(path); | 1469 | btrfs_free_path(path); |
1470 | return root; | 1470 | return root; |
1471 | 1471 | ||
1472 | read_fail: | 1472 | read_fail: |
1473 | free_extent_buffer(root->node); | 1473 | free_extent_buffer(root->node); |
1474 | find_fail: | 1474 | find_fail: |
1475 | kfree(root); | 1475 | kfree(root); |
1476 | alloc_fail: | 1476 | alloc_fail: |
1477 | root = ERR_PTR(ret); | 1477 | root = ERR_PTR(ret); |
1478 | goto out; | 1478 | goto out; |
1479 | } | 1479 | } |
1480 | 1480 | ||
1481 | struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, | 1481 | struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, |
1482 | struct btrfs_key *location) | 1482 | struct btrfs_key *location) |
1483 | { | 1483 | { |
1484 | struct btrfs_root *root; | 1484 | struct btrfs_root *root; |
1485 | 1485 | ||
1486 | root = btrfs_read_tree_root(tree_root, location); | 1486 | root = btrfs_read_tree_root(tree_root, location); |
1487 | if (IS_ERR(root)) | 1487 | if (IS_ERR(root)) |
1488 | return root; | 1488 | return root; |
1489 | 1489 | ||
1490 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { | 1490 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { |
1491 | root->ref_cows = 1; | 1491 | root->ref_cows = 1; |
1492 | btrfs_check_and_init_root_item(&root->root_item); | 1492 | btrfs_check_and_init_root_item(&root->root_item); |
1493 | } | 1493 | } |
1494 | 1494 | ||
1495 | return root; | 1495 | return root; |
1496 | } | 1496 | } |
1497 | 1497 | ||
1498 | int btrfs_init_fs_root(struct btrfs_root *root) | 1498 | int btrfs_init_fs_root(struct btrfs_root *root) |
1499 | { | 1499 | { |
1500 | int ret; | 1500 | int ret; |
1501 | 1501 | ||
1502 | root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); | 1502 | root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); |
1503 | root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), | 1503 | root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), |
1504 | GFP_NOFS); | 1504 | GFP_NOFS); |
1505 | if (!root->free_ino_pinned || !root->free_ino_ctl) { | 1505 | if (!root->free_ino_pinned || !root->free_ino_ctl) { |
1506 | ret = -ENOMEM; | 1506 | ret = -ENOMEM; |
1507 | goto fail; | 1507 | goto fail; |
1508 | } | 1508 | } |
1509 | 1509 | ||
1510 | btrfs_init_free_ino_ctl(root); | 1510 | btrfs_init_free_ino_ctl(root); |
1511 | mutex_init(&root->fs_commit_mutex); | 1511 | mutex_init(&root->fs_commit_mutex); |
1512 | spin_lock_init(&root->cache_lock); | 1512 | spin_lock_init(&root->cache_lock); |
1513 | init_waitqueue_head(&root->cache_wait); | 1513 | init_waitqueue_head(&root->cache_wait); |
1514 | 1514 | ||
1515 | ret = get_anon_bdev(&root->anon_dev); | 1515 | ret = get_anon_bdev(&root->anon_dev); |
1516 | if (ret) | 1516 | if (ret) |
1517 | goto fail; | 1517 | goto fail; |
1518 | return 0; | 1518 | return 0; |
1519 | fail: | 1519 | fail: |
1520 | kfree(root->free_ino_ctl); | 1520 | kfree(root->free_ino_ctl); |
1521 | kfree(root->free_ino_pinned); | 1521 | kfree(root->free_ino_pinned); |
1522 | return ret; | 1522 | return ret; |
1523 | } | 1523 | } |
1524 | 1524 | ||
1525 | static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, | 1525 | static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, |
1526 | u64 root_id) | 1526 | u64 root_id) |
1527 | { | 1527 | { |
1528 | struct btrfs_root *root; | 1528 | struct btrfs_root *root; |
1529 | 1529 | ||
1530 | spin_lock(&fs_info->fs_roots_radix_lock); | 1530 | spin_lock(&fs_info->fs_roots_radix_lock); |
1531 | root = radix_tree_lookup(&fs_info->fs_roots_radix, | 1531 | root = radix_tree_lookup(&fs_info->fs_roots_radix, |
1532 | (unsigned long)root_id); | 1532 | (unsigned long)root_id); |
1533 | spin_unlock(&fs_info->fs_roots_radix_lock); | 1533 | spin_unlock(&fs_info->fs_roots_radix_lock); |
1534 | return root; | 1534 | return root; |
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, | 1537 | int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, |
1538 | struct btrfs_root *root) | 1538 | struct btrfs_root *root) |
1539 | { | 1539 | { |
1540 | int ret; | 1540 | int ret; |
1541 | 1541 | ||
1542 | ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); | 1542 | ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); |
1543 | if (ret) | 1543 | if (ret) |
1544 | return ret; | 1544 | return ret; |
1545 | 1545 | ||
1546 | spin_lock(&fs_info->fs_roots_radix_lock); | 1546 | spin_lock(&fs_info->fs_roots_radix_lock); |
1547 | ret = radix_tree_insert(&fs_info->fs_roots_radix, | 1547 | ret = radix_tree_insert(&fs_info->fs_roots_radix, |
1548 | (unsigned long)root->root_key.objectid, | 1548 | (unsigned long)root->root_key.objectid, |
1549 | root); | 1549 | root); |
1550 | if (ret == 0) | 1550 | if (ret == 0) |
1551 | root->in_radix = 1; | 1551 | root->in_radix = 1; |
1552 | spin_unlock(&fs_info->fs_roots_radix_lock); | 1552 | spin_unlock(&fs_info->fs_roots_radix_lock); |
1553 | radix_tree_preload_end(); | 1553 | radix_tree_preload_end(); |
1554 | 1554 | ||
1555 | return ret; | 1555 | return ret; |
1556 | } | 1556 | } |
1557 | 1557 | ||
1558 | struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, | 1558 | struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, |
1559 | struct btrfs_key *location, | 1559 | struct btrfs_key *location, |
1560 | bool check_ref) | 1560 | bool check_ref) |
1561 | { | 1561 | { |
1562 | struct btrfs_root *root; | 1562 | struct btrfs_root *root; |
1563 | int ret; | 1563 | int ret; |
1564 | 1564 | ||
1565 | if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) | 1565 | if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) |
1566 | return fs_info->tree_root; | 1566 | return fs_info->tree_root; |
1567 | if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) | 1567 | if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) |
1568 | return fs_info->extent_root; | 1568 | return fs_info->extent_root; |
1569 | if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) | 1569 | if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) |
1570 | return fs_info->chunk_root; | 1570 | return fs_info->chunk_root; |
1571 | if (location->objectid == BTRFS_DEV_TREE_OBJECTID) | 1571 | if (location->objectid == BTRFS_DEV_TREE_OBJECTID) |
1572 | return fs_info->dev_root; | 1572 | return fs_info->dev_root; |
1573 | if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) | 1573 | if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) |
1574 | return fs_info->csum_root; | 1574 | return fs_info->csum_root; |
1575 | if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) | 1575 | if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) |
1576 | return fs_info->quota_root ? fs_info->quota_root : | 1576 | return fs_info->quota_root ? fs_info->quota_root : |
1577 | ERR_PTR(-ENOENT); | 1577 | ERR_PTR(-ENOENT); |
1578 | if (location->objectid == BTRFS_UUID_TREE_OBJECTID) | 1578 | if (location->objectid == BTRFS_UUID_TREE_OBJECTID) |
1579 | return fs_info->uuid_root ? fs_info->uuid_root : | 1579 | return fs_info->uuid_root ? fs_info->uuid_root : |
1580 | ERR_PTR(-ENOENT); | 1580 | ERR_PTR(-ENOENT); |
1581 | again: | 1581 | again: |
1582 | root = btrfs_lookup_fs_root(fs_info, location->objectid); | 1582 | root = btrfs_lookup_fs_root(fs_info, location->objectid); |
1583 | if (root) { | 1583 | if (root) { |
1584 | if (check_ref && btrfs_root_refs(&root->root_item) == 0) | 1584 | if (check_ref && btrfs_root_refs(&root->root_item) == 0) |
1585 | return ERR_PTR(-ENOENT); | 1585 | return ERR_PTR(-ENOENT); |
1586 | return root; | 1586 | return root; |
1587 | } | 1587 | } |
1588 | 1588 | ||
1589 | root = btrfs_read_fs_root(fs_info->tree_root, location); | 1589 | root = btrfs_read_fs_root(fs_info->tree_root, location); |
1590 | if (IS_ERR(root)) | 1590 | if (IS_ERR(root)) |
1591 | return root; | 1591 | return root; |
1592 | 1592 | ||
1593 | if (check_ref && btrfs_root_refs(&root->root_item) == 0) { | 1593 | if (check_ref && btrfs_root_refs(&root->root_item) == 0) { |
1594 | ret = -ENOENT; | 1594 | ret = -ENOENT; |
1595 | goto fail; | 1595 | goto fail; |
1596 | } | 1596 | } |
1597 | 1597 | ||
1598 | ret = btrfs_init_fs_root(root); | 1598 | ret = btrfs_init_fs_root(root); |
1599 | if (ret) | 1599 | if (ret) |
1600 | goto fail; | 1600 | goto fail; |
1601 | 1601 | ||
1602 | ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID, | 1602 | ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID, |
1603 | location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL); | 1603 | location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL); |
1604 | if (ret < 0) | 1604 | if (ret < 0) |
1605 | goto fail; | 1605 | goto fail; |
1606 | if (ret == 0) | 1606 | if (ret == 0) |
1607 | root->orphan_item_inserted = 1; | 1607 | root->orphan_item_inserted = 1; |
1608 | 1608 | ||
1609 | ret = btrfs_insert_fs_root(fs_info, root); | 1609 | ret = btrfs_insert_fs_root(fs_info, root); |
1610 | if (ret) { | 1610 | if (ret) { |
1611 | if (ret == -EEXIST) { | 1611 | if (ret == -EEXIST) { |
1612 | free_fs_root(root); | 1612 | free_fs_root(root); |
1613 | goto again; | 1613 | goto again; |
1614 | } | 1614 | } |
1615 | goto fail; | 1615 | goto fail; |
1616 | } | 1616 | } |
1617 | return root; | 1617 | return root; |
1618 | fail: | 1618 | fail: |
1619 | free_fs_root(root); | 1619 | free_fs_root(root); |
1620 | return ERR_PTR(ret); | 1620 | return ERR_PTR(ret); |
1621 | } | 1621 | } |
1622 | 1622 | ||
1623 | static int btrfs_congested_fn(void *congested_data, int bdi_bits) | 1623 | static int btrfs_congested_fn(void *congested_data, int bdi_bits) |
1624 | { | 1624 | { |
1625 | struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; | 1625 | struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; |
1626 | int ret = 0; | 1626 | int ret = 0; |
1627 | struct btrfs_device *device; | 1627 | struct btrfs_device *device; |
1628 | struct backing_dev_info *bdi; | 1628 | struct backing_dev_info *bdi; |
1629 | 1629 | ||
1630 | rcu_read_lock(); | 1630 | rcu_read_lock(); |
1631 | list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { | 1631 | list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { |
1632 | if (!device->bdev) | 1632 | if (!device->bdev) |
1633 | continue; | 1633 | continue; |
1634 | bdi = blk_get_backing_dev_info(device->bdev); | 1634 | bdi = blk_get_backing_dev_info(device->bdev); |
1635 | if (bdi && bdi_congested(bdi, bdi_bits)) { | 1635 | if (bdi && bdi_congested(bdi, bdi_bits)) { |
1636 | ret = 1; | 1636 | ret = 1; |
1637 | break; | 1637 | break; |
1638 | } | 1638 | } |
1639 | } | 1639 | } |
1640 | rcu_read_unlock(); | 1640 | rcu_read_unlock(); |
1641 | return ret; | 1641 | return ret; |
1642 | } | 1642 | } |
1643 | 1643 | ||
1644 | /* | 1644 | /* |
1645 | * If this fails, caller must call bdi_destroy() to get rid of the | 1645 | * If this fails, caller must call bdi_destroy() to get rid of the |
1646 | * bdi again. | 1646 | * bdi again. |
1647 | */ | 1647 | */ |
1648 | static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) | 1648 | static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) |
1649 | { | 1649 | { |
1650 | int err; | 1650 | int err; |
1651 | 1651 | ||
1652 | bdi->capabilities = BDI_CAP_MAP_COPY; | 1652 | bdi->capabilities = BDI_CAP_MAP_COPY; |
1653 | err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY); | 1653 | err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY); |
1654 | if (err) | 1654 | if (err) |
1655 | return err; | 1655 | return err; |
1656 | 1656 | ||
1657 | bdi->ra_pages = default_backing_dev_info.ra_pages; | 1657 | bdi->ra_pages = default_backing_dev_info.ra_pages; |
1658 | bdi->congested_fn = btrfs_congested_fn; | 1658 | bdi->congested_fn = btrfs_congested_fn; |
1659 | bdi->congested_data = info; | 1659 | bdi->congested_data = info; |
1660 | return 0; | 1660 | return 0; |
1661 | } | 1661 | } |
1662 | 1662 | ||
1663 | /* | 1663 | /* |
1664 | * called by the kthread helper functions to finally call the bio end_io | 1664 | * called by the kthread helper functions to finally call the bio end_io |
1665 | * functions. This is where read checksum verification actually happens | 1665 | * functions. This is where read checksum verification actually happens |
1666 | */ | 1666 | */ |
1667 | static void end_workqueue_fn(struct btrfs_work *work) | 1667 | static void end_workqueue_fn(struct btrfs_work *work) |
1668 | { | 1668 | { |
1669 | struct bio *bio; | 1669 | struct bio *bio; |
1670 | struct end_io_wq *end_io_wq; | 1670 | struct end_io_wq *end_io_wq; |
1671 | int error; | 1671 | int error; |
1672 | 1672 | ||
1673 | end_io_wq = container_of(work, struct end_io_wq, work); | 1673 | end_io_wq = container_of(work, struct end_io_wq, work); |
1674 | bio = end_io_wq->bio; | 1674 | bio = end_io_wq->bio; |
1675 | 1675 | ||
1676 | error = end_io_wq->error; | 1676 | error = end_io_wq->error; |
1677 | bio->bi_private = end_io_wq->private; | 1677 | bio->bi_private = end_io_wq->private; |
1678 | bio->bi_end_io = end_io_wq->end_io; | 1678 | bio->bi_end_io = end_io_wq->end_io; |
1679 | kfree(end_io_wq); | 1679 | kfree(end_io_wq); |
1680 | bio_endio_nodec(bio, error); | 1680 | bio_endio_nodec(bio, error); |
1681 | } | 1681 | } |
1682 | 1682 | ||
1683 | static int cleaner_kthread(void *arg) | 1683 | static int cleaner_kthread(void *arg) |
1684 | { | 1684 | { |
1685 | struct btrfs_root *root = arg; | 1685 | struct btrfs_root *root = arg; |
1686 | int again; | 1686 | int again; |
1687 | 1687 | ||
1688 | do { | 1688 | do { |
1689 | again = 0; | 1689 | again = 0; |
1690 | 1690 | ||
1691 | /* Make the cleaner go to sleep early. */ | 1691 | /* Make the cleaner go to sleep early. */ |
1692 | if (btrfs_need_cleaner_sleep(root)) | 1692 | if (btrfs_need_cleaner_sleep(root)) |
1693 | goto sleep; | 1693 | goto sleep; |
1694 | 1694 | ||
1695 | if (!mutex_trylock(&root->fs_info->cleaner_mutex)) | 1695 | if (!mutex_trylock(&root->fs_info->cleaner_mutex)) |
1696 | goto sleep; | 1696 | goto sleep; |
1697 | 1697 | ||
1698 | /* | 1698 | /* |
1699 | * Avoid the problem that we change the status of the fs | 1699 | * Avoid the problem that we change the status of the fs |
1700 | * during the above check and trylock. | 1700 | * during the above check and trylock. |
1701 | */ | 1701 | */ |
1702 | if (btrfs_need_cleaner_sleep(root)) { | 1702 | if (btrfs_need_cleaner_sleep(root)) { |
1703 | mutex_unlock(&root->fs_info->cleaner_mutex); | 1703 | mutex_unlock(&root->fs_info->cleaner_mutex); |
1704 | goto sleep; | 1704 | goto sleep; |
1705 | } | 1705 | } |
1706 | 1706 | ||
1707 | btrfs_run_delayed_iputs(root); | 1707 | btrfs_run_delayed_iputs(root); |
1708 | again = btrfs_clean_one_deleted_snapshot(root); | 1708 | again = btrfs_clean_one_deleted_snapshot(root); |
1709 | mutex_unlock(&root->fs_info->cleaner_mutex); | 1709 | mutex_unlock(&root->fs_info->cleaner_mutex); |
1710 | 1710 | ||
1711 | /* | 1711 | /* |
1712 | * The defragger has dealt with the R/O remount and umount, | 1712 | * The defragger has dealt with the R/O remount and umount, |
1713 | * needn't do anything special here. | 1713 | * needn't do anything special here. |
1714 | */ | 1714 | */ |
1715 | btrfs_run_defrag_inodes(root->fs_info); | 1715 | btrfs_run_defrag_inodes(root->fs_info); |
1716 | sleep: | 1716 | sleep: |
1717 | if (!try_to_freeze() && !again) { | 1717 | if (!try_to_freeze() && !again) { |
1718 | set_current_state(TASK_INTERRUPTIBLE); | 1718 | set_current_state(TASK_INTERRUPTIBLE); |
1719 | if (!kthread_should_stop()) | 1719 | if (!kthread_should_stop()) |
1720 | schedule(); | 1720 | schedule(); |
1721 | __set_current_state(TASK_RUNNING); | 1721 | __set_current_state(TASK_RUNNING); |
1722 | } | 1722 | } |
1723 | } while (!kthread_should_stop()); | 1723 | } while (!kthread_should_stop()); |
1724 | return 0; | 1724 | return 0; |
1725 | } | 1725 | } |
1726 | 1726 | ||
1727 | static int transaction_kthread(void *arg) | 1727 | static int transaction_kthread(void *arg) |
1728 | { | 1728 | { |
1729 | struct btrfs_root *root = arg; | 1729 | struct btrfs_root *root = arg; |
1730 | struct btrfs_trans_handle *trans; | 1730 | struct btrfs_trans_handle *trans; |
1731 | struct btrfs_transaction *cur; | 1731 | struct btrfs_transaction *cur; |
1732 | u64 transid; | 1732 | u64 transid; |
1733 | unsigned long now; | 1733 | unsigned long now; |
1734 | unsigned long delay; | 1734 | unsigned long delay; |
1735 | bool cannot_commit; | 1735 | bool cannot_commit; |
1736 | 1736 | ||
1737 | do { | 1737 | do { |
1738 | cannot_commit = false; | 1738 | cannot_commit = false; |
1739 | delay = HZ * root->fs_info->commit_interval; | 1739 | delay = HZ * root->fs_info->commit_interval; |
1740 | mutex_lock(&root->fs_info->transaction_kthread_mutex); | 1740 | mutex_lock(&root->fs_info->transaction_kthread_mutex); |
1741 | 1741 | ||
1742 | spin_lock(&root->fs_info->trans_lock); | 1742 | spin_lock(&root->fs_info->trans_lock); |
1743 | cur = root->fs_info->running_transaction; | 1743 | cur = root->fs_info->running_transaction; |
1744 | if (!cur) { | 1744 | if (!cur) { |
1745 | spin_unlock(&root->fs_info->trans_lock); | 1745 | spin_unlock(&root->fs_info->trans_lock); |
1746 | goto sleep; | 1746 | goto sleep; |
1747 | } | 1747 | } |
1748 | 1748 | ||
1749 | now = get_seconds(); | 1749 | now = get_seconds(); |
1750 | if (cur->state < TRANS_STATE_BLOCKED && | 1750 | if (cur->state < TRANS_STATE_BLOCKED && |
1751 | (now < cur->start_time || | 1751 | (now < cur->start_time || |
1752 | now - cur->start_time < root->fs_info->commit_interval)) { | 1752 | now - cur->start_time < root->fs_info->commit_interval)) { |
1753 | spin_unlock(&root->fs_info->trans_lock); | 1753 | spin_unlock(&root->fs_info->trans_lock); |
1754 | delay = HZ * 5; | 1754 | delay = HZ * 5; |
1755 | goto sleep; | 1755 | goto sleep; |
1756 | } | 1756 | } |
1757 | transid = cur->transid; | 1757 | transid = cur->transid; |
1758 | spin_unlock(&root->fs_info->trans_lock); | 1758 | spin_unlock(&root->fs_info->trans_lock); |
1759 | 1759 | ||
1760 | /* If the file system is aborted, this will always fail. */ | 1760 | /* If the file system is aborted, this will always fail. */ |
1761 | trans = btrfs_attach_transaction(root); | 1761 | trans = btrfs_attach_transaction(root); |
1762 | if (IS_ERR(trans)) { | 1762 | if (IS_ERR(trans)) { |
1763 | if (PTR_ERR(trans) != -ENOENT) | 1763 | if (PTR_ERR(trans) != -ENOENT) |
1764 | cannot_commit = true; | 1764 | cannot_commit = true; |
1765 | goto sleep; | 1765 | goto sleep; |
1766 | } | 1766 | } |
1767 | if (transid == trans->transid) { | 1767 | if (transid == trans->transid) { |
1768 | btrfs_commit_transaction(trans, root); | 1768 | btrfs_commit_transaction(trans, root); |
1769 | } else { | 1769 | } else { |
1770 | btrfs_end_transaction(trans, root); | 1770 | btrfs_end_transaction(trans, root); |
1771 | } | 1771 | } |
1772 | sleep: | 1772 | sleep: |
1773 | wake_up_process(root->fs_info->cleaner_kthread); | 1773 | wake_up_process(root->fs_info->cleaner_kthread); |
1774 | mutex_unlock(&root->fs_info->transaction_kthread_mutex); | 1774 | mutex_unlock(&root->fs_info->transaction_kthread_mutex); |
1775 | 1775 | ||
1776 | if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, | 1776 | if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, |
1777 | &root->fs_info->fs_state))) | 1777 | &root->fs_info->fs_state))) |
1778 | btrfs_cleanup_transaction(root); | 1778 | btrfs_cleanup_transaction(root); |
1779 | if (!try_to_freeze()) { | 1779 | if (!try_to_freeze()) { |
1780 | set_current_state(TASK_INTERRUPTIBLE); | 1780 | set_current_state(TASK_INTERRUPTIBLE); |
1781 | if (!kthread_should_stop() && | 1781 | if (!kthread_should_stop() && |
1782 | (!btrfs_transaction_blocked(root->fs_info) || | 1782 | (!btrfs_transaction_blocked(root->fs_info) || |
1783 | cannot_commit)) | 1783 | cannot_commit)) |
1784 | schedule_timeout(delay); | 1784 | schedule_timeout(delay); |
1785 | __set_current_state(TASK_RUNNING); | 1785 | __set_current_state(TASK_RUNNING); |
1786 | } | 1786 | } |
1787 | } while (!kthread_should_stop()); | 1787 | } while (!kthread_should_stop()); |
1788 | return 0; | 1788 | return 0; |
1789 | } | 1789 | } |
1790 | 1790 | ||
1791 | /* | 1791 | /* |
1792 | * this will find the highest generation in the array of | 1792 | * this will find the highest generation in the array of |
1793 | * root backups. The index of the highest array is returned, | 1793 | * root backups. The index of the highest array is returned, |
1794 | * or -1 if we can't find anything. | 1794 | * or -1 if we can't find anything. |
1795 | * | 1795 | * |
1796 | * We check to make sure the array is valid by comparing the | 1796 | * We check to make sure the array is valid by comparing the |
1797 | * generation of the latest root in the array with the generation | 1797 | * generation of the latest root in the array with the generation |
1798 | * in the super block. If they don't match we pitch it. | 1798 | * in the super block. If they don't match we pitch it. |
1799 | */ | 1799 | */ |
1800 | static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) | 1800 | static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) |
1801 | { | 1801 | { |
1802 | u64 cur; | 1802 | u64 cur; |
1803 | int newest_index = -1; | 1803 | int newest_index = -1; |
1804 | struct btrfs_root_backup *root_backup; | 1804 | struct btrfs_root_backup *root_backup; |
1805 | int i; | 1805 | int i; |
1806 | 1806 | ||
1807 | for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { | 1807 | for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { |
1808 | root_backup = info->super_copy->super_roots + i; | 1808 | root_backup = info->super_copy->super_roots + i; |
1809 | cur = btrfs_backup_tree_root_gen(root_backup); | 1809 | cur = btrfs_backup_tree_root_gen(root_backup); |
1810 | if (cur == newest_gen) | 1810 | if (cur == newest_gen) |
1811 | newest_index = i; | 1811 | newest_index = i; |
1812 | } | 1812 | } |
1813 | 1813 | ||
1814 | /* check to see if we actually wrapped around */ | 1814 | /* check to see if we actually wrapped around */ |
1815 | if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { | 1815 | if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { |
1816 | root_backup = info->super_copy->super_roots; | 1816 | root_backup = info->super_copy->super_roots; |
1817 | cur = btrfs_backup_tree_root_gen(root_backup); | 1817 | cur = btrfs_backup_tree_root_gen(root_backup); |
1818 | if (cur == newest_gen) | 1818 | if (cur == newest_gen) |
1819 | newest_index = 0; | 1819 | newest_index = 0; |
1820 | } | 1820 | } |
1821 | return newest_index; | 1821 | return newest_index; |
1822 | } | 1822 | } |
1823 | 1823 | ||
1824 | 1824 | ||
1825 | /* | 1825 | /* |
1826 | * find the oldest backup so we know where to store new entries | 1826 | * find the oldest backup so we know where to store new entries |
1827 | * in the backup array. This will set the backup_root_index | 1827 | * in the backup array. This will set the backup_root_index |
1828 | * field in the fs_info struct | 1828 | * field in the fs_info struct |
1829 | */ | 1829 | */ |
1830 | static void find_oldest_super_backup(struct btrfs_fs_info *info, | 1830 | static void find_oldest_super_backup(struct btrfs_fs_info *info, |
1831 | u64 newest_gen) | 1831 | u64 newest_gen) |
1832 | { | 1832 | { |
1833 | int newest_index = -1; | 1833 | int newest_index = -1; |
1834 | 1834 | ||
1835 | newest_index = find_newest_super_backup(info, newest_gen); | 1835 | newest_index = find_newest_super_backup(info, newest_gen); |
1836 | /* if there was garbage in there, just move along */ | 1836 | /* if there was garbage in there, just move along */ |
1837 | if (newest_index == -1) { | 1837 | if (newest_index == -1) { |
1838 | info->backup_root_index = 0; | 1838 | info->backup_root_index = 0; |
1839 | } else { | 1839 | } else { |
1840 | info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; | 1840 | info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; |
1841 | } | 1841 | } |
1842 | } | 1842 | } |
1843 | 1843 | ||
1844 | /* | 1844 | /* |
1845 | * copy all the root pointers into the super backup array. | 1845 | * copy all the root pointers into the super backup array. |
1846 | * this will bump the backup pointer by one when it is | 1846 | * this will bump the backup pointer by one when it is |
1847 | * done | 1847 | * done |
1848 | */ | 1848 | */ |
1849 | static void backup_super_roots(struct btrfs_fs_info *info) | 1849 | static void backup_super_roots(struct btrfs_fs_info *info) |
1850 | { | 1850 | { |
1851 | int next_backup; | 1851 | int next_backup; |
1852 | struct btrfs_root_backup *root_backup; | 1852 | struct btrfs_root_backup *root_backup; |
1853 | int last_backup; | 1853 | int last_backup; |
1854 | 1854 | ||
1855 | next_backup = info->backup_root_index; | 1855 | next_backup = info->backup_root_index; |
1856 | last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % | 1856 | last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % |
1857 | BTRFS_NUM_BACKUP_ROOTS; | 1857 | BTRFS_NUM_BACKUP_ROOTS; |
1858 | 1858 | ||
1859 | /* | 1859 | /* |
1860 | * just overwrite the last backup if we're at the same generation | 1860 | * just overwrite the last backup if we're at the same generation |
1861 | * this happens only at umount | 1861 | * this happens only at umount |
1862 | */ | 1862 | */ |
1863 | root_backup = info->super_for_commit->super_roots + last_backup; | 1863 | root_backup = info->super_for_commit->super_roots + last_backup; |
1864 | if (btrfs_backup_tree_root_gen(root_backup) == | 1864 | if (btrfs_backup_tree_root_gen(root_backup) == |
1865 | btrfs_header_generation(info->tree_root->node)) | 1865 | btrfs_header_generation(info->tree_root->node)) |
1866 | next_backup = last_backup; | 1866 | next_backup = last_backup; |
1867 | 1867 | ||
1868 | root_backup = info->super_for_commit->super_roots + next_backup; | 1868 | root_backup = info->super_for_commit->super_roots + next_backup; |
1869 | 1869 | ||
1870 | /* | 1870 | /* |
1871 | * make sure all of our padding and empty slots get zero filled | 1871 | * make sure all of our padding and empty slots get zero filled |
1872 | * regardless of which ones we use today | 1872 | * regardless of which ones we use today |
1873 | */ | 1873 | */ |
1874 | memset(root_backup, 0, sizeof(*root_backup)); | 1874 | memset(root_backup, 0, sizeof(*root_backup)); |
1875 | 1875 | ||
1876 | info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; | 1876 | info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; |
1877 | 1877 | ||
1878 | btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); | 1878 | btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); |
1879 | btrfs_set_backup_tree_root_gen(root_backup, | 1879 | btrfs_set_backup_tree_root_gen(root_backup, |
1880 | btrfs_header_generation(info->tree_root->node)); | 1880 | btrfs_header_generation(info->tree_root->node)); |
1881 | 1881 | ||
1882 | btrfs_set_backup_tree_root_level(root_backup, | 1882 | btrfs_set_backup_tree_root_level(root_backup, |
1883 | btrfs_header_level(info->tree_root->node)); | 1883 | btrfs_header_level(info->tree_root->node)); |
1884 | 1884 | ||
1885 | btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); | 1885 | btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); |
1886 | btrfs_set_backup_chunk_root_gen(root_backup, | 1886 | btrfs_set_backup_chunk_root_gen(root_backup, |
1887 | btrfs_header_generation(info->chunk_root->node)); | 1887 | btrfs_header_generation(info->chunk_root->node)); |
1888 | btrfs_set_backup_chunk_root_level(root_backup, | 1888 | btrfs_set_backup_chunk_root_level(root_backup, |
1889 | btrfs_header_level(info->chunk_root->node)); | 1889 | btrfs_header_level(info->chunk_root->node)); |
1890 | 1890 | ||
1891 | btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); | 1891 | btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); |
1892 | btrfs_set_backup_extent_root_gen(root_backup, | 1892 | btrfs_set_backup_extent_root_gen(root_backup, |
1893 | btrfs_header_generation(info->extent_root->node)); | 1893 | btrfs_header_generation(info->extent_root->node)); |
1894 | btrfs_set_backup_extent_root_level(root_backup, | 1894 | btrfs_set_backup_extent_root_level(root_backup, |
1895 | btrfs_header_level(info->extent_root->node)); | 1895 | btrfs_header_level(info->extent_root->node)); |
1896 | 1896 | ||
1897 | /* | 1897 | /* |
1898 | * we might commit during log recovery, which happens before we set | 1898 | * we might commit during log recovery, which happens before we set |
1899 | * the fs_root. Make sure it is valid before we fill it in. | 1899 | * the fs_root. Make sure it is valid before we fill it in. |
1900 | */ | 1900 | */ |
1901 | if (info->fs_root && info->fs_root->node) { | 1901 | if (info->fs_root && info->fs_root->node) { |
1902 | btrfs_set_backup_fs_root(root_backup, | 1902 | btrfs_set_backup_fs_root(root_backup, |
1903 | info->fs_root->node->start); | 1903 | info->fs_root->node->start); |
1904 | btrfs_set_backup_fs_root_gen(root_backup, | 1904 | btrfs_set_backup_fs_root_gen(root_backup, |
1905 | btrfs_header_generation(info->fs_root->node)); | 1905 | btrfs_header_generation(info->fs_root->node)); |
1906 | btrfs_set_backup_fs_root_level(root_backup, | 1906 | btrfs_set_backup_fs_root_level(root_backup, |
1907 | btrfs_header_level(info->fs_root->node)); | 1907 | btrfs_header_level(info->fs_root->node)); |
1908 | } | 1908 | } |
1909 | 1909 | ||
1910 | btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); | 1910 | btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); |
1911 | btrfs_set_backup_dev_root_gen(root_backup, | 1911 | btrfs_set_backup_dev_root_gen(root_backup, |
1912 | btrfs_header_generation(info->dev_root->node)); | 1912 | btrfs_header_generation(info->dev_root->node)); |
1913 | btrfs_set_backup_dev_root_level(root_backup, | 1913 | btrfs_set_backup_dev_root_level(root_backup, |
1914 | btrfs_header_level(info->dev_root->node)); | 1914 | btrfs_header_level(info->dev_root->node)); |
1915 | 1915 | ||
1916 | btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); | 1916 | btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); |
1917 | btrfs_set_backup_csum_root_gen(root_backup, | 1917 | btrfs_set_backup_csum_root_gen(root_backup, |
1918 | btrfs_header_generation(info->csum_root->node)); | 1918 | btrfs_header_generation(info->csum_root->node)); |
1919 | btrfs_set_backup_csum_root_level(root_backup, | 1919 | btrfs_set_backup_csum_root_level(root_backup, |
1920 | btrfs_header_level(info->csum_root->node)); | 1920 | btrfs_header_level(info->csum_root->node)); |
1921 | 1921 | ||
1922 | btrfs_set_backup_total_bytes(root_backup, | 1922 | btrfs_set_backup_total_bytes(root_backup, |
1923 | btrfs_super_total_bytes(info->super_copy)); | 1923 | btrfs_super_total_bytes(info->super_copy)); |
1924 | btrfs_set_backup_bytes_used(root_backup, | 1924 | btrfs_set_backup_bytes_used(root_backup, |
1925 | btrfs_super_bytes_used(info->super_copy)); | 1925 | btrfs_super_bytes_used(info->super_copy)); |
1926 | btrfs_set_backup_num_devices(root_backup, | 1926 | btrfs_set_backup_num_devices(root_backup, |
1927 | btrfs_super_num_devices(info->super_copy)); | 1927 | btrfs_super_num_devices(info->super_copy)); |
1928 | 1928 | ||
1929 | /* | 1929 | /* |
1930 | * if we don't copy this out to the super_copy, it won't get remembered | 1930 | * if we don't copy this out to the super_copy, it won't get remembered |
1931 | * for the next commit | 1931 | * for the next commit |
1932 | */ | 1932 | */ |
1933 | memcpy(&info->super_copy->super_roots, | 1933 | memcpy(&info->super_copy->super_roots, |
1934 | &info->super_for_commit->super_roots, | 1934 | &info->super_for_commit->super_roots, |
1935 | sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); | 1935 | sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); |
1936 | } | 1936 | } |
1937 | 1937 | ||
1938 | /* | 1938 | /* |
1939 | * this copies info out of the root backup array and back into | 1939 | * this copies info out of the root backup array and back into |
1940 | * the in-memory super block. It is meant to help iterate through | 1940 | * the in-memory super block. It is meant to help iterate through |
1941 | * the array, so you send it the number of backups you've already | 1941 | * the array, so you send it the number of backups you've already |
1942 | * tried and the last backup index you used. | 1942 | * tried and the last backup index you used. |
1943 | * | 1943 | * |
1944 | * this returns -1 when it has tried all the backups | 1944 | * this returns -1 when it has tried all the backups |
1945 | */ | 1945 | */ |
1946 | static noinline int next_root_backup(struct btrfs_fs_info *info, | 1946 | static noinline int next_root_backup(struct btrfs_fs_info *info, |
1947 | struct btrfs_super_block *super, | 1947 | struct btrfs_super_block *super, |
1948 | int *num_backups_tried, int *backup_index) | 1948 | int *num_backups_tried, int *backup_index) |
1949 | { | 1949 | { |
1950 | struct btrfs_root_backup *root_backup; | 1950 | struct btrfs_root_backup *root_backup; |
1951 | int newest = *backup_index; | 1951 | int newest = *backup_index; |
1952 | 1952 | ||
1953 | if (*num_backups_tried == 0) { | 1953 | if (*num_backups_tried == 0) { |
1954 | u64 gen = btrfs_super_generation(super); | 1954 | u64 gen = btrfs_super_generation(super); |
1955 | 1955 | ||
1956 | newest = find_newest_super_backup(info, gen); | 1956 | newest = find_newest_super_backup(info, gen); |
1957 | if (newest == -1) | 1957 | if (newest == -1) |
1958 | return -1; | 1958 | return -1; |
1959 | 1959 | ||
1960 | *backup_index = newest; | 1960 | *backup_index = newest; |
1961 | *num_backups_tried = 1; | 1961 | *num_backups_tried = 1; |
1962 | } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { | 1962 | } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { |
1963 | /* we've tried all the backups, all done */ | 1963 | /* we've tried all the backups, all done */ |
1964 | return -1; | 1964 | return -1; |
1965 | } else { | 1965 | } else { |
1966 | /* jump to the next oldest backup */ | 1966 | /* jump to the next oldest backup */ |
1967 | newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % | 1967 | newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % |
1968 | BTRFS_NUM_BACKUP_ROOTS; | 1968 | BTRFS_NUM_BACKUP_ROOTS; |
1969 | *backup_index = newest; | 1969 | *backup_index = newest; |
1970 | *num_backups_tried += 1; | 1970 | *num_backups_tried += 1; |
1971 | } | 1971 | } |
1972 | root_backup = super->super_roots + newest; | 1972 | root_backup = super->super_roots + newest; |
1973 | 1973 | ||
1974 | btrfs_set_super_generation(super, | 1974 | btrfs_set_super_generation(super, |
1975 | btrfs_backup_tree_root_gen(root_backup)); | 1975 | btrfs_backup_tree_root_gen(root_backup)); |
1976 | btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); | 1976 | btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); |
1977 | btrfs_set_super_root_level(super, | 1977 | btrfs_set_super_root_level(super, |
1978 | btrfs_backup_tree_root_level(root_backup)); | 1978 | btrfs_backup_tree_root_level(root_backup)); |
1979 | btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); | 1979 | btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); |
1980 | 1980 | ||
1981 | /* | 1981 | /* |
1982 | * fixme: the total bytes and num_devices need to match or we should | 1982 | * fixme: the total bytes and num_devices need to match or we should |
1983 | * need a fsck | 1983 | * need a fsck |
1984 | */ | 1984 | */ |
1985 | btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); | 1985 | btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); |
1986 | btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); | 1986 | btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); |
1987 | return 0; | 1987 | return 0; |
1988 | } | 1988 | } |
1989 | 1989 | ||
1990 | /* helper to cleanup workers */ | 1990 | /* helper to cleanup workers */ |
1991 | static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) | 1991 | static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) |
1992 | { | 1992 | { |
1993 | btrfs_stop_workers(&fs_info->generic_worker); | 1993 | btrfs_stop_workers(&fs_info->generic_worker); |
1994 | btrfs_stop_workers(&fs_info->fixup_workers); | 1994 | btrfs_stop_workers(&fs_info->fixup_workers); |
1995 | btrfs_stop_workers(&fs_info->delalloc_workers); | 1995 | btrfs_stop_workers(&fs_info->delalloc_workers); |
1996 | btrfs_stop_workers(&fs_info->workers); | 1996 | btrfs_stop_workers(&fs_info->workers); |
1997 | btrfs_stop_workers(&fs_info->endio_workers); | 1997 | btrfs_stop_workers(&fs_info->endio_workers); |
1998 | btrfs_stop_workers(&fs_info->endio_meta_workers); | 1998 | btrfs_stop_workers(&fs_info->endio_meta_workers); |
1999 | btrfs_stop_workers(&fs_info->endio_raid56_workers); | 1999 | btrfs_stop_workers(&fs_info->endio_raid56_workers); |
2000 | btrfs_stop_workers(&fs_info->rmw_workers); | 2000 | btrfs_stop_workers(&fs_info->rmw_workers); |
2001 | btrfs_stop_workers(&fs_info->endio_meta_write_workers); | 2001 | btrfs_stop_workers(&fs_info->endio_meta_write_workers); |
2002 | btrfs_stop_workers(&fs_info->endio_write_workers); | 2002 | btrfs_stop_workers(&fs_info->endio_write_workers); |
2003 | btrfs_stop_workers(&fs_info->endio_freespace_worker); | 2003 | btrfs_stop_workers(&fs_info->endio_freespace_worker); |
2004 | btrfs_stop_workers(&fs_info->submit_workers); | 2004 | btrfs_stop_workers(&fs_info->submit_workers); |
2005 | btrfs_stop_workers(&fs_info->delayed_workers); | 2005 | btrfs_stop_workers(&fs_info->delayed_workers); |
2006 | btrfs_stop_workers(&fs_info->caching_workers); | 2006 | btrfs_stop_workers(&fs_info->caching_workers); |
2007 | btrfs_stop_workers(&fs_info->readahead_workers); | 2007 | btrfs_stop_workers(&fs_info->readahead_workers); |
2008 | btrfs_stop_workers(&fs_info->flush_workers); | 2008 | btrfs_stop_workers(&fs_info->flush_workers); |
2009 | btrfs_stop_workers(&fs_info->qgroup_rescan_workers); | 2009 | btrfs_stop_workers(&fs_info->qgroup_rescan_workers); |
2010 | } | 2010 | } |
2011 | 2011 | ||
2012 | static void free_root_extent_buffers(struct btrfs_root *root) | 2012 | static void free_root_extent_buffers(struct btrfs_root *root) |
2013 | { | 2013 | { |
2014 | if (root) { | 2014 | if (root) { |
2015 | free_extent_buffer(root->node); | 2015 | free_extent_buffer(root->node); |
2016 | free_extent_buffer(root->commit_root); | 2016 | free_extent_buffer(root->commit_root); |
2017 | root->node = NULL; | 2017 | root->node = NULL; |
2018 | root->commit_root = NULL; | 2018 | root->commit_root = NULL; |
2019 | } | 2019 | } |
2020 | } | 2020 | } |
2021 | 2021 | ||
2022 | /* helper to cleanup tree roots */ | 2022 | /* helper to cleanup tree roots */ |
2023 | static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) | 2023 | static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) |
2024 | { | 2024 | { |
2025 | free_root_extent_buffers(info->tree_root); | 2025 | free_root_extent_buffers(info->tree_root); |
2026 | 2026 | ||
2027 | free_root_extent_buffers(info->dev_root); | 2027 | free_root_extent_buffers(info->dev_root); |
2028 | free_root_extent_buffers(info->extent_root); | 2028 | free_root_extent_buffers(info->extent_root); |
2029 | free_root_extent_buffers(info->csum_root); | 2029 | free_root_extent_buffers(info->csum_root); |
2030 | free_root_extent_buffers(info->quota_root); | 2030 | free_root_extent_buffers(info->quota_root); |
2031 | free_root_extent_buffers(info->uuid_root); | 2031 | free_root_extent_buffers(info->uuid_root); |
2032 | if (chunk_root) | 2032 | if (chunk_root) |
2033 | free_root_extent_buffers(info->chunk_root); | 2033 | free_root_extent_buffers(info->chunk_root); |
2034 | } | 2034 | } |
2035 | 2035 | ||
2036 | static void del_fs_roots(struct btrfs_fs_info *fs_info) | 2036 | static void del_fs_roots(struct btrfs_fs_info *fs_info) |
2037 | { | 2037 | { |
2038 | int ret; | 2038 | int ret; |
2039 | struct btrfs_root *gang[8]; | 2039 | struct btrfs_root *gang[8]; |
2040 | int i; | 2040 | int i; |
2041 | 2041 | ||
2042 | while (!list_empty(&fs_info->dead_roots)) { | 2042 | while (!list_empty(&fs_info->dead_roots)) { |
2043 | gang[0] = list_entry(fs_info->dead_roots.next, | 2043 | gang[0] = list_entry(fs_info->dead_roots.next, |
2044 | struct btrfs_root, root_list); | 2044 | struct btrfs_root, root_list); |
2045 | list_del(&gang[0]->root_list); | 2045 | list_del(&gang[0]->root_list); |
2046 | 2046 | ||
2047 | if (gang[0]->in_radix) { | 2047 | if (gang[0]->in_radix) { |
2048 | btrfs_drop_and_free_fs_root(fs_info, gang[0]); | 2048 | btrfs_drop_and_free_fs_root(fs_info, gang[0]); |
2049 | } else { | 2049 | } else { |
2050 | free_extent_buffer(gang[0]->node); | 2050 | free_extent_buffer(gang[0]->node); |
2051 | free_extent_buffer(gang[0]->commit_root); | 2051 | free_extent_buffer(gang[0]->commit_root); |
2052 | btrfs_put_fs_root(gang[0]); | 2052 | btrfs_put_fs_root(gang[0]); |
2053 | } | 2053 | } |
2054 | } | 2054 | } |
2055 | 2055 | ||
2056 | while (1) { | 2056 | while (1) { |
2057 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, | 2057 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, |
2058 | (void **)gang, 0, | 2058 | (void **)gang, 0, |
2059 | ARRAY_SIZE(gang)); | 2059 | ARRAY_SIZE(gang)); |
2060 | if (!ret) | 2060 | if (!ret) |
2061 | break; | 2061 | break; |
2062 | for (i = 0; i < ret; i++) | 2062 | for (i = 0; i < ret; i++) |
2063 | btrfs_drop_and_free_fs_root(fs_info, gang[i]); | 2063 | btrfs_drop_and_free_fs_root(fs_info, gang[i]); |
2064 | } | 2064 | } |
2065 | 2065 | ||
2066 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { | 2066 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { |
2067 | btrfs_free_log_root_tree(NULL, fs_info); | 2067 | btrfs_free_log_root_tree(NULL, fs_info); |
2068 | btrfs_destroy_pinned_extent(fs_info->tree_root, | 2068 | btrfs_destroy_pinned_extent(fs_info->tree_root, |
2069 | fs_info->pinned_extents); | 2069 | fs_info->pinned_extents); |
2070 | } | 2070 | } |
2071 | } | 2071 | } |
2072 | 2072 | ||
2073 | int open_ctree(struct super_block *sb, | 2073 | int open_ctree(struct super_block *sb, |
2074 | struct btrfs_fs_devices *fs_devices, | 2074 | struct btrfs_fs_devices *fs_devices, |
2075 | char *options) | 2075 | char *options) |
2076 | { | 2076 | { |
2077 | u32 sectorsize; | 2077 | u32 sectorsize; |
2078 | u32 nodesize; | 2078 | u32 nodesize; |
2079 | u32 leafsize; | 2079 | u32 leafsize; |
2080 | u32 blocksize; | 2080 | u32 blocksize; |
2081 | u32 stripesize; | 2081 | u32 stripesize; |
2082 | u64 generation; | 2082 | u64 generation; |
2083 | u64 features; | 2083 | u64 features; |
2084 | struct btrfs_key location; | 2084 | struct btrfs_key location; |
2085 | struct buffer_head *bh; | 2085 | struct buffer_head *bh; |
2086 | struct btrfs_super_block *disk_super; | 2086 | struct btrfs_super_block *disk_super; |
2087 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); | 2087 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
2088 | struct btrfs_root *tree_root; | 2088 | struct btrfs_root *tree_root; |
2089 | struct btrfs_root *extent_root; | 2089 | struct btrfs_root *extent_root; |
2090 | struct btrfs_root *csum_root; | 2090 | struct btrfs_root *csum_root; |
2091 | struct btrfs_root *chunk_root; | 2091 | struct btrfs_root *chunk_root; |
2092 | struct btrfs_root *dev_root; | 2092 | struct btrfs_root *dev_root; |
2093 | struct btrfs_root *quota_root; | 2093 | struct btrfs_root *quota_root; |
2094 | struct btrfs_root *uuid_root; | 2094 | struct btrfs_root *uuid_root; |
2095 | struct btrfs_root *log_tree_root; | 2095 | struct btrfs_root *log_tree_root; |
2096 | int ret; | 2096 | int ret; |
2097 | int err = -EINVAL; | 2097 | int err = -EINVAL; |
2098 | int num_backups_tried = 0; | 2098 | int num_backups_tried = 0; |
2099 | int backup_index = 0; | 2099 | int backup_index = 0; |
2100 | bool create_uuid_tree; | 2100 | bool create_uuid_tree; |
2101 | bool check_uuid_tree; | 2101 | bool check_uuid_tree; |
2102 | 2102 | ||
2103 | tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info); | 2103 | tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info); |
2104 | chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info); | 2104 | chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info); |
2105 | if (!tree_root || !chunk_root) { | 2105 | if (!tree_root || !chunk_root) { |
2106 | err = -ENOMEM; | 2106 | err = -ENOMEM; |
2107 | goto fail; | 2107 | goto fail; |
2108 | } | 2108 | } |
2109 | 2109 | ||
2110 | ret = init_srcu_struct(&fs_info->subvol_srcu); | 2110 | ret = init_srcu_struct(&fs_info->subvol_srcu); |
2111 | if (ret) { | 2111 | if (ret) { |
2112 | err = ret; | 2112 | err = ret; |
2113 | goto fail; | 2113 | goto fail; |
2114 | } | 2114 | } |
2115 | 2115 | ||
2116 | ret = setup_bdi(fs_info, &fs_info->bdi); | 2116 | ret = setup_bdi(fs_info, &fs_info->bdi); |
2117 | if (ret) { | 2117 | if (ret) { |
2118 | err = ret; | 2118 | err = ret; |
2119 | goto fail_srcu; | 2119 | goto fail_srcu; |
2120 | } | 2120 | } |
2121 | 2121 | ||
2122 | ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0); | 2122 | ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0); |
2123 | if (ret) { | 2123 | if (ret) { |
2124 | err = ret; | 2124 | err = ret; |
2125 | goto fail_bdi; | 2125 | goto fail_bdi; |
2126 | } | 2126 | } |
2127 | fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * | 2127 | fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * |
2128 | (1 + ilog2(nr_cpu_ids)); | 2128 | (1 + ilog2(nr_cpu_ids)); |
2129 | 2129 | ||
2130 | ret = percpu_counter_init(&fs_info->delalloc_bytes, 0); | 2130 | ret = percpu_counter_init(&fs_info->delalloc_bytes, 0); |
2131 | if (ret) { | 2131 | if (ret) { |
2132 | err = ret; | 2132 | err = ret; |
2133 | goto fail_dirty_metadata_bytes; | 2133 | goto fail_dirty_metadata_bytes; |
2134 | } | 2134 | } |
2135 | 2135 | ||
2136 | fs_info->btree_inode = new_inode(sb); | 2136 | fs_info->btree_inode = new_inode(sb); |
2137 | if (!fs_info->btree_inode) { | 2137 | if (!fs_info->btree_inode) { |
2138 | err = -ENOMEM; | 2138 | err = -ENOMEM; |
2139 | goto fail_delalloc_bytes; | 2139 | goto fail_delalloc_bytes; |
2140 | } | 2140 | } |
2141 | 2141 | ||
2142 | mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); | 2142 | mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); |
2143 | 2143 | ||
2144 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); | 2144 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); |
2145 | INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); | 2145 | INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); |
2146 | INIT_LIST_HEAD(&fs_info->trans_list); | 2146 | INIT_LIST_HEAD(&fs_info->trans_list); |
2147 | INIT_LIST_HEAD(&fs_info->dead_roots); | 2147 | INIT_LIST_HEAD(&fs_info->dead_roots); |
2148 | INIT_LIST_HEAD(&fs_info->delayed_iputs); | 2148 | INIT_LIST_HEAD(&fs_info->delayed_iputs); |
2149 | INIT_LIST_HEAD(&fs_info->delalloc_roots); | 2149 | INIT_LIST_HEAD(&fs_info->delalloc_roots); |
2150 | INIT_LIST_HEAD(&fs_info->caching_block_groups); | 2150 | INIT_LIST_HEAD(&fs_info->caching_block_groups); |
2151 | spin_lock_init(&fs_info->delalloc_root_lock); | 2151 | spin_lock_init(&fs_info->delalloc_root_lock); |
2152 | spin_lock_init(&fs_info->trans_lock); | 2152 | spin_lock_init(&fs_info->trans_lock); |
2153 | spin_lock_init(&fs_info->fs_roots_radix_lock); | 2153 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
2154 | spin_lock_init(&fs_info->delayed_iput_lock); | 2154 | spin_lock_init(&fs_info->delayed_iput_lock); |
2155 | spin_lock_init(&fs_info->defrag_inodes_lock); | 2155 | spin_lock_init(&fs_info->defrag_inodes_lock); |
2156 | spin_lock_init(&fs_info->free_chunk_lock); | 2156 | spin_lock_init(&fs_info->free_chunk_lock); |
2157 | spin_lock_init(&fs_info->tree_mod_seq_lock); | 2157 | spin_lock_init(&fs_info->tree_mod_seq_lock); |
2158 | spin_lock_init(&fs_info->super_lock); | 2158 | spin_lock_init(&fs_info->super_lock); |
2159 | spin_lock_init(&fs_info->buffer_lock); | 2159 | spin_lock_init(&fs_info->buffer_lock); |
2160 | rwlock_init(&fs_info->tree_mod_log_lock); | 2160 | rwlock_init(&fs_info->tree_mod_log_lock); |
2161 | mutex_init(&fs_info->reloc_mutex); | 2161 | mutex_init(&fs_info->reloc_mutex); |
2162 | seqlock_init(&fs_info->profiles_lock); | 2162 | seqlock_init(&fs_info->profiles_lock); |
2163 | 2163 | ||
2164 | init_completion(&fs_info->kobj_unregister); | 2164 | init_completion(&fs_info->kobj_unregister); |
2165 | INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); | 2165 | INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); |
2166 | INIT_LIST_HEAD(&fs_info->space_info); | 2166 | INIT_LIST_HEAD(&fs_info->space_info); |
2167 | INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); | 2167 | INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); |
2168 | btrfs_mapping_init(&fs_info->mapping_tree); | 2168 | btrfs_mapping_init(&fs_info->mapping_tree); |
2169 | btrfs_init_block_rsv(&fs_info->global_block_rsv, | 2169 | btrfs_init_block_rsv(&fs_info->global_block_rsv, |
2170 | BTRFS_BLOCK_RSV_GLOBAL); | 2170 | BTRFS_BLOCK_RSV_GLOBAL); |
2171 | btrfs_init_block_rsv(&fs_info->delalloc_block_rsv, | 2171 | btrfs_init_block_rsv(&fs_info->delalloc_block_rsv, |
2172 | BTRFS_BLOCK_RSV_DELALLOC); | 2172 | BTRFS_BLOCK_RSV_DELALLOC); |
2173 | btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); | 2173 | btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); |
2174 | btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); | 2174 | btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); |
2175 | btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); | 2175 | btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); |
2176 | btrfs_init_block_rsv(&fs_info->delayed_block_rsv, | 2176 | btrfs_init_block_rsv(&fs_info->delayed_block_rsv, |
2177 | BTRFS_BLOCK_RSV_DELOPS); | 2177 | BTRFS_BLOCK_RSV_DELOPS); |
2178 | atomic_set(&fs_info->nr_async_submits, 0); | 2178 | atomic_set(&fs_info->nr_async_submits, 0); |
2179 | atomic_set(&fs_info->async_delalloc_pages, 0); | 2179 | atomic_set(&fs_info->async_delalloc_pages, 0); |
2180 | atomic_set(&fs_info->async_submit_draining, 0); | 2180 | atomic_set(&fs_info->async_submit_draining, 0); |
2181 | atomic_set(&fs_info->nr_async_bios, 0); | 2181 | atomic_set(&fs_info->nr_async_bios, 0); |
2182 | atomic_set(&fs_info->defrag_running, 0); | 2182 | atomic_set(&fs_info->defrag_running, 0); |
2183 | atomic64_set(&fs_info->tree_mod_seq, 0); | 2183 | atomic64_set(&fs_info->tree_mod_seq, 0); |
2184 | fs_info->sb = sb; | 2184 | fs_info->sb = sb; |
2185 | fs_info->max_inline = 8192 * 1024; | 2185 | fs_info->max_inline = 8192 * 1024; |
2186 | fs_info->metadata_ratio = 0; | 2186 | fs_info->metadata_ratio = 0; |
2187 | fs_info->defrag_inodes = RB_ROOT; | 2187 | fs_info->defrag_inodes = RB_ROOT; |
2188 | fs_info->free_chunk_space = 0; | 2188 | fs_info->free_chunk_space = 0; |
2189 | fs_info->tree_mod_log = RB_ROOT; | 2189 | fs_info->tree_mod_log = RB_ROOT; |
2190 | fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; | 2190 | fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; |
2191 | fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64); | 2191 | fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64); |
2192 | /* readahead state */ | 2192 | /* readahead state */ |
2193 | INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); | 2193 | INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); |
2194 | spin_lock_init(&fs_info->reada_lock); | 2194 | spin_lock_init(&fs_info->reada_lock); |
2195 | 2195 | ||
2196 | fs_info->thread_pool_size = min_t(unsigned long, | 2196 | fs_info->thread_pool_size = min_t(unsigned long, |
2197 | num_online_cpus() + 2, 8); | 2197 | num_online_cpus() + 2, 8); |
2198 | 2198 | ||
2199 | INIT_LIST_HEAD(&fs_info->ordered_roots); | 2199 | INIT_LIST_HEAD(&fs_info->ordered_roots); |
2200 | spin_lock_init(&fs_info->ordered_root_lock); | 2200 | spin_lock_init(&fs_info->ordered_root_lock); |
2201 | fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), | 2201 | fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), |
2202 | GFP_NOFS); | 2202 | GFP_NOFS); |
2203 | if (!fs_info->delayed_root) { | 2203 | if (!fs_info->delayed_root) { |
2204 | err = -ENOMEM; | 2204 | err = -ENOMEM; |
2205 | goto fail_iput; | 2205 | goto fail_iput; |
2206 | } | 2206 | } |
2207 | btrfs_init_delayed_root(fs_info->delayed_root); | 2207 | btrfs_init_delayed_root(fs_info->delayed_root); |
2208 | 2208 | ||
2209 | mutex_init(&fs_info->scrub_lock); | 2209 | mutex_init(&fs_info->scrub_lock); |
2210 | atomic_set(&fs_info->scrubs_running, 0); | 2210 | atomic_set(&fs_info->scrubs_running, 0); |
2211 | atomic_set(&fs_info->scrub_pause_req, 0); | 2211 | atomic_set(&fs_info->scrub_pause_req, 0); |
2212 | atomic_set(&fs_info->scrubs_paused, 0); | 2212 | atomic_set(&fs_info->scrubs_paused, 0); |
2213 | atomic_set(&fs_info->scrub_cancel_req, 0); | 2213 | atomic_set(&fs_info->scrub_cancel_req, 0); |
2214 | init_waitqueue_head(&fs_info->scrub_pause_wait); | 2214 | init_waitqueue_head(&fs_info->scrub_pause_wait); |
2215 | fs_info->scrub_workers_refcnt = 0; | 2215 | fs_info->scrub_workers_refcnt = 0; |
2216 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY | 2216 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
2217 | fs_info->check_integrity_print_mask = 0; | 2217 | fs_info->check_integrity_print_mask = 0; |
2218 | #endif | 2218 | #endif |
2219 | 2219 | ||
2220 | spin_lock_init(&fs_info->balance_lock); | 2220 | spin_lock_init(&fs_info->balance_lock); |
2221 | mutex_init(&fs_info->balance_mutex); | 2221 | mutex_init(&fs_info->balance_mutex); |
2222 | atomic_set(&fs_info->balance_running, 0); | 2222 | atomic_set(&fs_info->balance_running, 0); |
2223 | atomic_set(&fs_info->balance_pause_req, 0); | 2223 | atomic_set(&fs_info->balance_pause_req, 0); |
2224 | atomic_set(&fs_info->balance_cancel_req, 0); | 2224 | atomic_set(&fs_info->balance_cancel_req, 0); |
2225 | fs_info->balance_ctl = NULL; | 2225 | fs_info->balance_ctl = NULL; |
2226 | init_waitqueue_head(&fs_info->balance_wait_q); | 2226 | init_waitqueue_head(&fs_info->balance_wait_q); |
2227 | 2227 | ||
2228 | sb->s_blocksize = 4096; | 2228 | sb->s_blocksize = 4096; |
2229 | sb->s_blocksize_bits = blksize_bits(4096); | 2229 | sb->s_blocksize_bits = blksize_bits(4096); |
2230 | sb->s_bdi = &fs_info->bdi; | 2230 | sb->s_bdi = &fs_info->bdi; |
2231 | 2231 | ||
2232 | fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; | 2232 | fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; |
2233 | set_nlink(fs_info->btree_inode, 1); | 2233 | set_nlink(fs_info->btree_inode, 1); |
2234 | /* | 2234 | /* |
2235 | * we set the i_size on the btree inode to the max possible int. | 2235 | * we set the i_size on the btree inode to the max possible int. |
2236 | * the real end of the address space is determined by all of | 2236 | * the real end of the address space is determined by all of |
2237 | * the devices in the system | 2237 | * the devices in the system |
2238 | */ | 2238 | */ |
2239 | fs_info->btree_inode->i_size = OFFSET_MAX; | 2239 | fs_info->btree_inode->i_size = OFFSET_MAX; |
2240 | fs_info->btree_inode->i_mapping->a_ops = &btree_aops; | 2240 | fs_info->btree_inode->i_mapping->a_ops = &btree_aops; |
2241 | fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi; | 2241 | fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi; |
2242 | 2242 | ||
2243 | RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); | 2243 | RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); |
2244 | extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, | 2244 | extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, |
2245 | fs_info->btree_inode->i_mapping); | 2245 | fs_info->btree_inode->i_mapping); |
2246 | BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0; | 2246 | BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0; |
2247 | extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); | 2247 | extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); |
2248 | 2248 | ||
2249 | BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; | 2249 | BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; |
2250 | 2250 | ||
2251 | BTRFS_I(fs_info->btree_inode)->root = tree_root; | 2251 | BTRFS_I(fs_info->btree_inode)->root = tree_root; |
2252 | memset(&BTRFS_I(fs_info->btree_inode)->location, 0, | 2252 | memset(&BTRFS_I(fs_info->btree_inode)->location, 0, |
2253 | sizeof(struct btrfs_key)); | 2253 | sizeof(struct btrfs_key)); |
2254 | set_bit(BTRFS_INODE_DUMMY, | 2254 | set_bit(BTRFS_INODE_DUMMY, |
2255 | &BTRFS_I(fs_info->btree_inode)->runtime_flags); | 2255 | &BTRFS_I(fs_info->btree_inode)->runtime_flags); |
2256 | btrfs_insert_inode_hash(fs_info->btree_inode); | 2256 | btrfs_insert_inode_hash(fs_info->btree_inode); |
2257 | 2257 | ||
2258 | spin_lock_init(&fs_info->block_group_cache_lock); | 2258 | spin_lock_init(&fs_info->block_group_cache_lock); |
2259 | fs_info->block_group_cache_tree = RB_ROOT; | 2259 | fs_info->block_group_cache_tree = RB_ROOT; |
2260 | fs_info->first_logical_byte = (u64)-1; | 2260 | fs_info->first_logical_byte = (u64)-1; |
2261 | 2261 | ||
2262 | extent_io_tree_init(&fs_info->freed_extents[0], | 2262 | extent_io_tree_init(&fs_info->freed_extents[0], |
2263 | fs_info->btree_inode->i_mapping); | 2263 | fs_info->btree_inode->i_mapping); |
2264 | extent_io_tree_init(&fs_info->freed_extents[1], | 2264 | extent_io_tree_init(&fs_info->freed_extents[1], |
2265 | fs_info->btree_inode->i_mapping); | 2265 | fs_info->btree_inode->i_mapping); |
2266 | fs_info->pinned_extents = &fs_info->freed_extents[0]; | 2266 | fs_info->pinned_extents = &fs_info->freed_extents[0]; |
2267 | fs_info->do_barriers = 1; | 2267 | fs_info->do_barriers = 1; |
2268 | 2268 | ||
2269 | 2269 | ||
2270 | mutex_init(&fs_info->ordered_operations_mutex); | 2270 | mutex_init(&fs_info->ordered_operations_mutex); |
2271 | mutex_init(&fs_info->ordered_extent_flush_mutex); | 2271 | mutex_init(&fs_info->ordered_extent_flush_mutex); |
2272 | mutex_init(&fs_info->tree_log_mutex); | 2272 | mutex_init(&fs_info->tree_log_mutex); |
2273 | mutex_init(&fs_info->chunk_mutex); | 2273 | mutex_init(&fs_info->chunk_mutex); |
2274 | mutex_init(&fs_info->transaction_kthread_mutex); | 2274 | mutex_init(&fs_info->transaction_kthread_mutex); |
2275 | mutex_init(&fs_info->cleaner_mutex); | 2275 | mutex_init(&fs_info->cleaner_mutex); |
2276 | mutex_init(&fs_info->volume_mutex); | 2276 | mutex_init(&fs_info->volume_mutex); |
2277 | init_rwsem(&fs_info->extent_commit_sem); | 2277 | init_rwsem(&fs_info->extent_commit_sem); |
2278 | init_rwsem(&fs_info->cleanup_work_sem); | 2278 | init_rwsem(&fs_info->cleanup_work_sem); |
2279 | init_rwsem(&fs_info->subvol_sem); | 2279 | init_rwsem(&fs_info->subvol_sem); |
2280 | sema_init(&fs_info->uuid_tree_rescan_sem, 1); | 2280 | sema_init(&fs_info->uuid_tree_rescan_sem, 1); |
2281 | fs_info->dev_replace.lock_owner = 0; | 2281 | fs_info->dev_replace.lock_owner = 0; |
2282 | atomic_set(&fs_info->dev_replace.nesting_level, 0); | 2282 | atomic_set(&fs_info->dev_replace.nesting_level, 0); |
2283 | mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); | 2283 | mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); |
2284 | mutex_init(&fs_info->dev_replace.lock_management_lock); | 2284 | mutex_init(&fs_info->dev_replace.lock_management_lock); |
2285 | mutex_init(&fs_info->dev_replace.lock); | 2285 | mutex_init(&fs_info->dev_replace.lock); |
2286 | 2286 | ||
2287 | spin_lock_init(&fs_info->qgroup_lock); | 2287 | spin_lock_init(&fs_info->qgroup_lock); |
2288 | mutex_init(&fs_info->qgroup_ioctl_lock); | 2288 | mutex_init(&fs_info->qgroup_ioctl_lock); |
2289 | fs_info->qgroup_tree = RB_ROOT; | 2289 | fs_info->qgroup_tree = RB_ROOT; |
2290 | INIT_LIST_HEAD(&fs_info->dirty_qgroups); | 2290 | INIT_LIST_HEAD(&fs_info->dirty_qgroups); |
2291 | fs_info->qgroup_seq = 1; | 2291 | fs_info->qgroup_seq = 1; |
2292 | fs_info->quota_enabled = 0; | 2292 | fs_info->quota_enabled = 0; |
2293 | fs_info->pending_quota_state = 0; | 2293 | fs_info->pending_quota_state = 0; |
2294 | fs_info->qgroup_ulist = NULL; | 2294 | fs_info->qgroup_ulist = NULL; |
2295 | mutex_init(&fs_info->qgroup_rescan_lock); | 2295 | mutex_init(&fs_info->qgroup_rescan_lock); |
2296 | 2296 | ||
2297 | btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); | 2297 | btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); |
2298 | btrfs_init_free_cluster(&fs_info->data_alloc_cluster); | 2298 | btrfs_init_free_cluster(&fs_info->data_alloc_cluster); |
2299 | 2299 | ||
2300 | init_waitqueue_head(&fs_info->transaction_throttle); | 2300 | init_waitqueue_head(&fs_info->transaction_throttle); |
2301 | init_waitqueue_head(&fs_info->transaction_wait); | 2301 | init_waitqueue_head(&fs_info->transaction_wait); |
2302 | init_waitqueue_head(&fs_info->transaction_blocked_wait); | 2302 | init_waitqueue_head(&fs_info->transaction_blocked_wait); |
2303 | init_waitqueue_head(&fs_info->async_submit_wait); | 2303 | init_waitqueue_head(&fs_info->async_submit_wait); |
2304 | 2304 | ||
2305 | ret = btrfs_alloc_stripe_hash_table(fs_info); | 2305 | ret = btrfs_alloc_stripe_hash_table(fs_info); |
2306 | if (ret) { | 2306 | if (ret) { |
2307 | err = ret; | 2307 | err = ret; |
2308 | goto fail_alloc; | 2308 | goto fail_alloc; |
2309 | } | 2309 | } |
2310 | 2310 | ||
2311 | __setup_root(4096, 4096, 4096, 4096, tree_root, | 2311 | __setup_root(4096, 4096, 4096, 4096, tree_root, |
2312 | fs_info, BTRFS_ROOT_TREE_OBJECTID); | 2312 | fs_info, BTRFS_ROOT_TREE_OBJECTID); |
2313 | 2313 | ||
2314 | invalidate_bdev(fs_devices->latest_bdev); | 2314 | invalidate_bdev(fs_devices->latest_bdev); |
2315 | 2315 | ||
2316 | /* | 2316 | /* |
2317 | * Read super block and check the signature bytes only | 2317 | * Read super block and check the signature bytes only |
2318 | */ | 2318 | */ |
2319 | bh = btrfs_read_dev_super(fs_devices->latest_bdev); | 2319 | bh = btrfs_read_dev_super(fs_devices->latest_bdev); |
2320 | if (!bh) { | 2320 | if (!bh) { |
2321 | err = -EINVAL; | 2321 | err = -EINVAL; |
2322 | goto fail_alloc; | 2322 | goto fail_alloc; |
2323 | } | 2323 | } |
2324 | 2324 | ||
2325 | /* | 2325 | /* |
2326 | * We want to check superblock checksum, the type is stored inside. | 2326 | * We want to check superblock checksum, the type is stored inside. |
2327 | * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). | 2327 | * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). |
2328 | */ | 2328 | */ |
2329 | if (btrfs_check_super_csum(bh->b_data)) { | 2329 | if (btrfs_check_super_csum(bh->b_data)) { |
2330 | printk(KERN_ERR "BTRFS: superblock checksum mismatch\n"); | 2330 | printk(KERN_ERR "BTRFS: superblock checksum mismatch\n"); |
2331 | err = -EINVAL; | 2331 | err = -EINVAL; |
2332 | goto fail_alloc; | 2332 | goto fail_alloc; |
2333 | } | 2333 | } |
2334 | 2334 | ||
2335 | /* | 2335 | /* |
2336 | * super_copy is zeroed at allocation time and we never touch the | 2336 | * super_copy is zeroed at allocation time and we never touch the |
2337 | * following bytes up to INFO_SIZE, the checksum is calculated from | 2337 | * following bytes up to INFO_SIZE, the checksum is calculated from |
2338 | * the whole block of INFO_SIZE | 2338 | * the whole block of INFO_SIZE |
2339 | */ | 2339 | */ |
2340 | memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); | 2340 | memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); |
2341 | memcpy(fs_info->super_for_commit, fs_info->super_copy, | 2341 | memcpy(fs_info->super_for_commit, fs_info->super_copy, |
2342 | sizeof(*fs_info->super_for_commit)); | 2342 | sizeof(*fs_info->super_for_commit)); |
2343 | brelse(bh); | 2343 | brelse(bh); |
2344 | 2344 | ||
2345 | memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); | 2345 | memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); |
2346 | 2346 | ||
2347 | ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); | 2347 | ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); |
2348 | if (ret) { | 2348 | if (ret) { |
2349 | printk(KERN_ERR "BTRFS: superblock contains fatal errors\n"); | 2349 | printk(KERN_ERR "BTRFS: superblock contains fatal errors\n"); |
2350 | err = -EINVAL; | 2350 | err = -EINVAL; |
2351 | goto fail_alloc; | 2351 | goto fail_alloc; |
2352 | } | 2352 | } |
2353 | 2353 | ||
2354 | disk_super = fs_info->super_copy; | 2354 | disk_super = fs_info->super_copy; |
2355 | if (!btrfs_super_root(disk_super)) | 2355 | if (!btrfs_super_root(disk_super)) |
2356 | goto fail_alloc; | 2356 | goto fail_alloc; |
2357 | 2357 | ||
2358 | /* check FS state, whether FS is broken. */ | 2358 | /* check FS state, whether FS is broken. */ |
2359 | if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) | 2359 | if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) |
2360 | set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); | 2360 | set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); |
2361 | 2361 | ||
2362 | /* | 2362 | /* |
2363 | * run through our array of backup supers and setup | 2363 | * run through our array of backup supers and setup |
2364 | * our ring pointer to the oldest one | 2364 | * our ring pointer to the oldest one |
2365 | */ | 2365 | */ |
2366 | generation = btrfs_super_generation(disk_super); | 2366 | generation = btrfs_super_generation(disk_super); |
2367 | find_oldest_super_backup(fs_info, generation); | 2367 | find_oldest_super_backup(fs_info, generation); |
2368 | 2368 | ||
2369 | /* | 2369 | /* |
2370 | * In the long term, we'll store the compression type in the super | 2370 | * In the long term, we'll store the compression type in the super |
2371 | * block, and it'll be used for per file compression control. | 2371 | * block, and it'll be used for per file compression control. |
2372 | */ | 2372 | */ |
2373 | fs_info->compress_type = BTRFS_COMPRESS_ZLIB; | 2373 | fs_info->compress_type = BTRFS_COMPRESS_ZLIB; |
2374 | 2374 | ||
2375 | ret = btrfs_parse_options(tree_root, options); | 2375 | ret = btrfs_parse_options(tree_root, options); |
2376 | if (ret) { | 2376 | if (ret) { |
2377 | err = ret; | 2377 | err = ret; |
2378 | goto fail_alloc; | 2378 | goto fail_alloc; |
2379 | } | 2379 | } |
2380 | 2380 | ||
2381 | features = btrfs_super_incompat_flags(disk_super) & | 2381 | features = btrfs_super_incompat_flags(disk_super) & |
2382 | ~BTRFS_FEATURE_INCOMPAT_SUPP; | 2382 | ~BTRFS_FEATURE_INCOMPAT_SUPP; |
2383 | if (features) { | 2383 | if (features) { |
2384 | printk(KERN_ERR "BTRFS: couldn't mount because of " | 2384 | printk(KERN_ERR "BTRFS: couldn't mount because of " |
2385 | "unsupported optional features (%Lx).\n", | 2385 | "unsupported optional features (%Lx).\n", |
2386 | features); | 2386 | features); |
2387 | err = -EINVAL; | 2387 | err = -EINVAL; |
2388 | goto fail_alloc; | 2388 | goto fail_alloc; |
2389 | } | 2389 | } |
2390 | 2390 | ||
2391 | if (btrfs_super_leafsize(disk_super) != | 2391 | if (btrfs_super_leafsize(disk_super) != |
2392 | btrfs_super_nodesize(disk_super)) { | 2392 | btrfs_super_nodesize(disk_super)) { |
2393 | printk(KERN_ERR "BTRFS: couldn't mount because metadata " | 2393 | printk(KERN_ERR "BTRFS: couldn't mount because metadata " |
2394 | "blocksizes don't match. node %d leaf %d\n", | 2394 | "blocksizes don't match. node %d leaf %d\n", |
2395 | btrfs_super_nodesize(disk_super), | 2395 | btrfs_super_nodesize(disk_super), |
2396 | btrfs_super_leafsize(disk_super)); | 2396 | btrfs_super_leafsize(disk_super)); |
2397 | err = -EINVAL; | 2397 | err = -EINVAL; |
2398 | goto fail_alloc; | 2398 | goto fail_alloc; |
2399 | } | 2399 | } |
2400 | if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) { | 2400 | if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) { |
2401 | printk(KERN_ERR "BTRFS: couldn't mount because metadata " | 2401 | printk(KERN_ERR "BTRFS: couldn't mount because metadata " |
2402 | "blocksize (%d) was too large\n", | 2402 | "blocksize (%d) was too large\n", |
2403 | btrfs_super_leafsize(disk_super)); | 2403 | btrfs_super_leafsize(disk_super)); |
2404 | err = -EINVAL; | 2404 | err = -EINVAL; |
2405 | goto fail_alloc; | 2405 | goto fail_alloc; |
2406 | } | 2406 | } |
2407 | 2407 | ||
2408 | features = btrfs_super_incompat_flags(disk_super); | 2408 | features = btrfs_super_incompat_flags(disk_super); |
2409 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; | 2409 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; |
2410 | if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) | 2410 | if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) |
2411 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; | 2411 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; |
2412 | 2412 | ||
2413 | if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) | 2413 | if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) |
2414 | printk(KERN_ERR "BTRFS: has skinny extents\n"); | 2414 | printk(KERN_INFO "BTRFS: has skinny extents\n"); |
2415 | 2415 | ||
2416 | /* | 2416 | /* |
2417 | * flag our filesystem as having big metadata blocks if | 2417 | * flag our filesystem as having big metadata blocks if |
2418 | * they are bigger than the page size | 2418 | * they are bigger than the page size |
2419 | */ | 2419 | */ |
2420 | if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) { | 2420 | if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) { |
2421 | if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) | 2421 | if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) |
2422 | printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n"); | 2422 | printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n"); |
2423 | features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; | 2423 | features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; |
2424 | } | 2424 | } |
2425 | 2425 | ||
2426 | nodesize = btrfs_super_nodesize(disk_super); | 2426 | nodesize = btrfs_super_nodesize(disk_super); |
2427 | leafsize = btrfs_super_leafsize(disk_super); | 2427 | leafsize = btrfs_super_leafsize(disk_super); |
2428 | sectorsize = btrfs_super_sectorsize(disk_super); | 2428 | sectorsize = btrfs_super_sectorsize(disk_super); |
2429 | stripesize = btrfs_super_stripesize(disk_super); | 2429 | stripesize = btrfs_super_stripesize(disk_super); |
2430 | fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids)); | 2430 | fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids)); |
2431 | fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); | 2431 | fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
2432 | 2432 | ||
2433 | /* | 2433 | /* |
2434 | * mixed block groups end up with duplicate but slightly offset | 2434 | * mixed block groups end up with duplicate but slightly offset |
2435 | * extent buffers for the same range. It leads to corruptions | 2435 | * extent buffers for the same range. It leads to corruptions |
2436 | */ | 2436 | */ |
2437 | if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && | 2437 | if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && |
2438 | (sectorsize != leafsize)) { | 2438 | (sectorsize != leafsize)) { |
2439 | printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes " | 2439 | printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes " |
2440 | "are not allowed for mixed block groups on %s\n", | 2440 | "are not allowed for mixed block groups on %s\n", |
2441 | sb->s_id); | 2441 | sb->s_id); |
2442 | goto fail_alloc; | 2442 | goto fail_alloc; |
2443 | } | 2443 | } |
2444 | 2444 | ||
2445 | /* | 2445 | /* |
2446 | * Needn't use the lock because there is no other task which will | 2446 | * Needn't use the lock because there is no other task which will |
2447 | * update the flag. | 2447 | * update the flag. |
2448 | */ | 2448 | */ |
2449 | btrfs_set_super_incompat_flags(disk_super, features); | 2449 | btrfs_set_super_incompat_flags(disk_super, features); |
2450 | 2450 | ||
2451 | features = btrfs_super_compat_ro_flags(disk_super) & | 2451 | features = btrfs_super_compat_ro_flags(disk_super) & |
2452 | ~BTRFS_FEATURE_COMPAT_RO_SUPP; | 2452 | ~BTRFS_FEATURE_COMPAT_RO_SUPP; |
2453 | if (!(sb->s_flags & MS_RDONLY) && features) { | 2453 | if (!(sb->s_flags & MS_RDONLY) && features) { |
2454 | printk(KERN_ERR "BTRFS: couldn't mount RDWR because of " | 2454 | printk(KERN_ERR "BTRFS: couldn't mount RDWR because of " |
2455 | "unsupported option features (%Lx).\n", | 2455 | "unsupported option features (%Lx).\n", |
2456 | features); | 2456 | features); |
2457 | err = -EINVAL; | 2457 | err = -EINVAL; |
2458 | goto fail_alloc; | 2458 | goto fail_alloc; |
2459 | } | 2459 | } |
2460 | 2460 | ||
2461 | btrfs_init_workers(&fs_info->generic_worker, | 2461 | btrfs_init_workers(&fs_info->generic_worker, |
2462 | "genwork", 1, NULL); | 2462 | "genwork", 1, NULL); |
2463 | 2463 | ||
2464 | btrfs_init_workers(&fs_info->workers, "worker", | 2464 | btrfs_init_workers(&fs_info->workers, "worker", |
2465 | fs_info->thread_pool_size, | 2465 | fs_info->thread_pool_size, |
2466 | &fs_info->generic_worker); | 2466 | &fs_info->generic_worker); |
2467 | 2467 | ||
2468 | btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", | 2468 | btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", |
2469 | fs_info->thread_pool_size, NULL); | 2469 | fs_info->thread_pool_size, NULL); |
2470 | 2470 | ||
2471 | btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc", | 2471 | btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc", |
2472 | fs_info->thread_pool_size, NULL); | 2472 | fs_info->thread_pool_size, NULL); |
2473 | 2473 | ||
2474 | btrfs_init_workers(&fs_info->submit_workers, "submit", | 2474 | btrfs_init_workers(&fs_info->submit_workers, "submit", |
2475 | min_t(u64, fs_devices->num_devices, | 2475 | min_t(u64, fs_devices->num_devices, |
2476 | fs_info->thread_pool_size), NULL); | 2476 | fs_info->thread_pool_size), NULL); |
2477 | 2477 | ||
2478 | btrfs_init_workers(&fs_info->caching_workers, "cache", | 2478 | btrfs_init_workers(&fs_info->caching_workers, "cache", |
2479 | fs_info->thread_pool_size, NULL); | 2479 | fs_info->thread_pool_size, NULL); |
2480 | 2480 | ||
2481 | /* a higher idle thresh on the submit workers makes it much more | 2481 | /* a higher idle thresh on the submit workers makes it much more |
2482 | * likely that bios will be send down in a sane order to the | 2482 | * likely that bios will be send down in a sane order to the |
2483 | * devices | 2483 | * devices |
2484 | */ | 2484 | */ |
2485 | fs_info->submit_workers.idle_thresh = 64; | 2485 | fs_info->submit_workers.idle_thresh = 64; |
2486 | 2486 | ||
2487 | fs_info->workers.idle_thresh = 16; | 2487 | fs_info->workers.idle_thresh = 16; |
2488 | fs_info->workers.ordered = 1; | 2488 | fs_info->workers.ordered = 1; |
2489 | 2489 | ||
2490 | fs_info->delalloc_workers.idle_thresh = 2; | 2490 | fs_info->delalloc_workers.idle_thresh = 2; |
2491 | fs_info->delalloc_workers.ordered = 1; | 2491 | fs_info->delalloc_workers.ordered = 1; |
2492 | 2492 | ||
2493 | btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1, | 2493 | btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1, |
2494 | &fs_info->generic_worker); | 2494 | &fs_info->generic_worker); |
2495 | btrfs_init_workers(&fs_info->endio_workers, "endio", | 2495 | btrfs_init_workers(&fs_info->endio_workers, "endio", |
2496 | fs_info->thread_pool_size, | 2496 | fs_info->thread_pool_size, |
2497 | &fs_info->generic_worker); | 2497 | &fs_info->generic_worker); |
2498 | btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", | 2498 | btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", |
2499 | fs_info->thread_pool_size, | 2499 | fs_info->thread_pool_size, |
2500 | &fs_info->generic_worker); | 2500 | &fs_info->generic_worker); |
2501 | btrfs_init_workers(&fs_info->endio_meta_write_workers, | 2501 | btrfs_init_workers(&fs_info->endio_meta_write_workers, |
2502 | "endio-meta-write", fs_info->thread_pool_size, | 2502 | "endio-meta-write", fs_info->thread_pool_size, |
2503 | &fs_info->generic_worker); | 2503 | &fs_info->generic_worker); |
2504 | btrfs_init_workers(&fs_info->endio_raid56_workers, | 2504 | btrfs_init_workers(&fs_info->endio_raid56_workers, |
2505 | "endio-raid56", fs_info->thread_pool_size, | 2505 | "endio-raid56", fs_info->thread_pool_size, |
2506 | &fs_info->generic_worker); | 2506 | &fs_info->generic_worker); |
2507 | btrfs_init_workers(&fs_info->rmw_workers, | 2507 | btrfs_init_workers(&fs_info->rmw_workers, |
2508 | "rmw", fs_info->thread_pool_size, | 2508 | "rmw", fs_info->thread_pool_size, |
2509 | &fs_info->generic_worker); | 2509 | &fs_info->generic_worker); |
2510 | btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", | 2510 | btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", |
2511 | fs_info->thread_pool_size, | 2511 | fs_info->thread_pool_size, |
2512 | &fs_info->generic_worker); | 2512 | &fs_info->generic_worker); |
2513 | btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", | 2513 | btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", |
2514 | 1, &fs_info->generic_worker); | 2514 | 1, &fs_info->generic_worker); |
2515 | btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", | 2515 | btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", |
2516 | fs_info->thread_pool_size, | 2516 | fs_info->thread_pool_size, |
2517 | &fs_info->generic_worker); | 2517 | &fs_info->generic_worker); |
2518 | btrfs_init_workers(&fs_info->readahead_workers, "readahead", | 2518 | btrfs_init_workers(&fs_info->readahead_workers, "readahead", |
2519 | fs_info->thread_pool_size, | 2519 | fs_info->thread_pool_size, |
2520 | &fs_info->generic_worker); | 2520 | &fs_info->generic_worker); |
2521 | btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1, | 2521 | btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1, |
2522 | &fs_info->generic_worker); | 2522 | &fs_info->generic_worker); |
2523 | 2523 | ||
2524 | /* | 2524 | /* |
2525 | * endios are largely parallel and should have a very | 2525 | * endios are largely parallel and should have a very |
2526 | * low idle thresh | 2526 | * low idle thresh |
2527 | */ | 2527 | */ |
2528 | fs_info->endio_workers.idle_thresh = 4; | 2528 | fs_info->endio_workers.idle_thresh = 4; |
2529 | fs_info->endio_meta_workers.idle_thresh = 4; | 2529 | fs_info->endio_meta_workers.idle_thresh = 4; |
2530 | fs_info->endio_raid56_workers.idle_thresh = 4; | 2530 | fs_info->endio_raid56_workers.idle_thresh = 4; |
2531 | fs_info->rmw_workers.idle_thresh = 2; | 2531 | fs_info->rmw_workers.idle_thresh = 2; |
2532 | 2532 | ||
2533 | fs_info->endio_write_workers.idle_thresh = 2; | 2533 | fs_info->endio_write_workers.idle_thresh = 2; |
2534 | fs_info->endio_meta_write_workers.idle_thresh = 2; | 2534 | fs_info->endio_meta_write_workers.idle_thresh = 2; |
2535 | fs_info->readahead_workers.idle_thresh = 2; | 2535 | fs_info->readahead_workers.idle_thresh = 2; |
2536 | 2536 | ||
2537 | /* | 2537 | /* |
2538 | * btrfs_start_workers can really only fail because of ENOMEM so just | 2538 | * btrfs_start_workers can really only fail because of ENOMEM so just |
2539 | * return -ENOMEM if any of these fail. | 2539 | * return -ENOMEM if any of these fail. |
2540 | */ | 2540 | */ |
2541 | ret = btrfs_start_workers(&fs_info->workers); | 2541 | ret = btrfs_start_workers(&fs_info->workers); |
2542 | ret |= btrfs_start_workers(&fs_info->generic_worker); | 2542 | ret |= btrfs_start_workers(&fs_info->generic_worker); |
2543 | ret |= btrfs_start_workers(&fs_info->submit_workers); | 2543 | ret |= btrfs_start_workers(&fs_info->submit_workers); |
2544 | ret |= btrfs_start_workers(&fs_info->delalloc_workers); | 2544 | ret |= btrfs_start_workers(&fs_info->delalloc_workers); |
2545 | ret |= btrfs_start_workers(&fs_info->fixup_workers); | 2545 | ret |= btrfs_start_workers(&fs_info->fixup_workers); |
2546 | ret |= btrfs_start_workers(&fs_info->endio_workers); | 2546 | ret |= btrfs_start_workers(&fs_info->endio_workers); |
2547 | ret |= btrfs_start_workers(&fs_info->endio_meta_workers); | 2547 | ret |= btrfs_start_workers(&fs_info->endio_meta_workers); |
2548 | ret |= btrfs_start_workers(&fs_info->rmw_workers); | 2548 | ret |= btrfs_start_workers(&fs_info->rmw_workers); |
2549 | ret |= btrfs_start_workers(&fs_info->endio_raid56_workers); | 2549 | ret |= btrfs_start_workers(&fs_info->endio_raid56_workers); |
2550 | ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers); | 2550 | ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers); |
2551 | ret |= btrfs_start_workers(&fs_info->endio_write_workers); | 2551 | ret |= btrfs_start_workers(&fs_info->endio_write_workers); |
2552 | ret |= btrfs_start_workers(&fs_info->endio_freespace_worker); | 2552 | ret |= btrfs_start_workers(&fs_info->endio_freespace_worker); |
2553 | ret |= btrfs_start_workers(&fs_info->delayed_workers); | 2553 | ret |= btrfs_start_workers(&fs_info->delayed_workers); |
2554 | ret |= btrfs_start_workers(&fs_info->caching_workers); | 2554 | ret |= btrfs_start_workers(&fs_info->caching_workers); |
2555 | ret |= btrfs_start_workers(&fs_info->readahead_workers); | 2555 | ret |= btrfs_start_workers(&fs_info->readahead_workers); |
2556 | ret |= btrfs_start_workers(&fs_info->flush_workers); | 2556 | ret |= btrfs_start_workers(&fs_info->flush_workers); |
2557 | ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers); | 2557 | ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers); |
2558 | if (ret) { | 2558 | if (ret) { |
2559 | err = -ENOMEM; | 2559 | err = -ENOMEM; |
2560 | goto fail_sb_buffer; | 2560 | goto fail_sb_buffer; |
2561 | } | 2561 | } |
2562 | 2562 | ||
2563 | fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); | 2563 | fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); |
2564 | fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, | 2564 | fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, |
2565 | 4 * 1024 * 1024 / PAGE_CACHE_SIZE); | 2565 | 4 * 1024 * 1024 / PAGE_CACHE_SIZE); |
2566 | 2566 | ||
2567 | tree_root->nodesize = nodesize; | 2567 | tree_root->nodesize = nodesize; |
2568 | tree_root->leafsize = leafsize; | 2568 | tree_root->leafsize = leafsize; |
2569 | tree_root->sectorsize = sectorsize; | 2569 | tree_root->sectorsize = sectorsize; |
2570 | tree_root->stripesize = stripesize; | 2570 | tree_root->stripesize = stripesize; |
2571 | 2571 | ||
2572 | sb->s_blocksize = sectorsize; | 2572 | sb->s_blocksize = sectorsize; |
2573 | sb->s_blocksize_bits = blksize_bits(sectorsize); | 2573 | sb->s_blocksize_bits = blksize_bits(sectorsize); |
2574 | 2574 | ||
2575 | if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) { | 2575 | if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) { |
2576 | printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id); | 2576 | printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id); |
2577 | goto fail_sb_buffer; | 2577 | goto fail_sb_buffer; |
2578 | } | 2578 | } |
2579 | 2579 | ||
2580 | if (sectorsize != PAGE_SIZE) { | 2580 | if (sectorsize != PAGE_SIZE) { |
2581 | printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) " | 2581 | printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) " |
2582 | "found on %s\n", (unsigned long)sectorsize, sb->s_id); | 2582 | "found on %s\n", (unsigned long)sectorsize, sb->s_id); |
2583 | goto fail_sb_buffer; | 2583 | goto fail_sb_buffer; |
2584 | } | 2584 | } |
2585 | 2585 | ||
2586 | mutex_lock(&fs_info->chunk_mutex); | 2586 | mutex_lock(&fs_info->chunk_mutex); |
2587 | ret = btrfs_read_sys_array(tree_root); | 2587 | ret = btrfs_read_sys_array(tree_root); |
2588 | mutex_unlock(&fs_info->chunk_mutex); | 2588 | mutex_unlock(&fs_info->chunk_mutex); |
2589 | if (ret) { | 2589 | if (ret) { |
2590 | printk(KERN_WARNING "BTRFS: failed to read the system " | 2590 | printk(KERN_WARNING "BTRFS: failed to read the system " |
2591 | "array on %s\n", sb->s_id); | 2591 | "array on %s\n", sb->s_id); |
2592 | goto fail_sb_buffer; | 2592 | goto fail_sb_buffer; |
2593 | } | 2593 | } |
2594 | 2594 | ||
2595 | blocksize = btrfs_level_size(tree_root, | 2595 | blocksize = btrfs_level_size(tree_root, |
2596 | btrfs_super_chunk_root_level(disk_super)); | 2596 | btrfs_super_chunk_root_level(disk_super)); |
2597 | generation = btrfs_super_chunk_root_generation(disk_super); | 2597 | generation = btrfs_super_chunk_root_generation(disk_super); |
2598 | 2598 | ||
2599 | __setup_root(nodesize, leafsize, sectorsize, stripesize, | 2599 | __setup_root(nodesize, leafsize, sectorsize, stripesize, |
2600 | chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); | 2600 | chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); |
2601 | 2601 | ||
2602 | chunk_root->node = read_tree_block(chunk_root, | 2602 | chunk_root->node = read_tree_block(chunk_root, |
2603 | btrfs_super_chunk_root(disk_super), | 2603 | btrfs_super_chunk_root(disk_super), |
2604 | blocksize, generation); | 2604 | blocksize, generation); |
2605 | if (!chunk_root->node || | 2605 | if (!chunk_root->node || |
2606 | !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { | 2606 | !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { |
2607 | printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n", | 2607 | printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n", |
2608 | sb->s_id); | 2608 | sb->s_id); |
2609 | goto fail_tree_roots; | 2609 | goto fail_tree_roots; |
2610 | } | 2610 | } |
2611 | btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); | 2611 | btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); |
2612 | chunk_root->commit_root = btrfs_root_node(chunk_root); | 2612 | chunk_root->commit_root = btrfs_root_node(chunk_root); |
2613 | 2613 | ||
2614 | read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, | 2614 | read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, |
2615 | btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); | 2615 | btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); |
2616 | 2616 | ||
2617 | ret = btrfs_read_chunk_tree(chunk_root); | 2617 | ret = btrfs_read_chunk_tree(chunk_root); |
2618 | if (ret) { | 2618 | if (ret) { |
2619 | printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n", | 2619 | printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n", |
2620 | sb->s_id); | 2620 | sb->s_id); |
2621 | goto fail_tree_roots; | 2621 | goto fail_tree_roots; |
2622 | } | 2622 | } |
2623 | 2623 | ||
2624 | /* | 2624 | /* |
2625 | * keep the device that is marked to be the target device for the | 2625 | * keep the device that is marked to be the target device for the |
2626 | * dev_replace procedure | 2626 | * dev_replace procedure |
2627 | */ | 2627 | */ |
2628 | btrfs_close_extra_devices(fs_info, fs_devices, 0); | 2628 | btrfs_close_extra_devices(fs_info, fs_devices, 0); |
2629 | 2629 | ||
2630 | if (!fs_devices->latest_bdev) { | 2630 | if (!fs_devices->latest_bdev) { |
2631 | printk(KERN_CRIT "BTRFS: failed to read devices on %s\n", | 2631 | printk(KERN_CRIT "BTRFS: failed to read devices on %s\n", |
2632 | sb->s_id); | 2632 | sb->s_id); |
2633 | goto fail_tree_roots; | 2633 | goto fail_tree_roots; |
2634 | } | 2634 | } |
2635 | 2635 | ||
2636 | retry_root_backup: | 2636 | retry_root_backup: |
2637 | blocksize = btrfs_level_size(tree_root, | 2637 | blocksize = btrfs_level_size(tree_root, |
2638 | btrfs_super_root_level(disk_super)); | 2638 | btrfs_super_root_level(disk_super)); |
2639 | generation = btrfs_super_generation(disk_super); | 2639 | generation = btrfs_super_generation(disk_super); |
2640 | 2640 | ||
2641 | tree_root->node = read_tree_block(tree_root, | 2641 | tree_root->node = read_tree_block(tree_root, |
2642 | btrfs_super_root(disk_super), | 2642 | btrfs_super_root(disk_super), |
2643 | blocksize, generation); | 2643 | blocksize, generation); |
2644 | if (!tree_root->node || | 2644 | if (!tree_root->node || |
2645 | !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) { | 2645 | !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) { |
2646 | printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", | 2646 | printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", |
2647 | sb->s_id); | 2647 | sb->s_id); |
2648 | 2648 | ||
2649 | goto recovery_tree_root; | 2649 | goto recovery_tree_root; |
2650 | } | 2650 | } |
2651 | 2651 | ||
2652 | btrfs_set_root_node(&tree_root->root_item, tree_root->node); | 2652 | btrfs_set_root_node(&tree_root->root_item, tree_root->node); |
2653 | tree_root->commit_root = btrfs_root_node(tree_root); | 2653 | tree_root->commit_root = btrfs_root_node(tree_root); |
2654 | btrfs_set_root_refs(&tree_root->root_item, 1); | 2654 | btrfs_set_root_refs(&tree_root->root_item, 1); |
2655 | 2655 | ||
2656 | location.objectid = BTRFS_EXTENT_TREE_OBJECTID; | 2656 | location.objectid = BTRFS_EXTENT_TREE_OBJECTID; |
2657 | location.type = BTRFS_ROOT_ITEM_KEY; | 2657 | location.type = BTRFS_ROOT_ITEM_KEY; |
2658 | location.offset = 0; | 2658 | location.offset = 0; |
2659 | 2659 | ||
2660 | extent_root = btrfs_read_tree_root(tree_root, &location); | 2660 | extent_root = btrfs_read_tree_root(tree_root, &location); |
2661 | if (IS_ERR(extent_root)) { | 2661 | if (IS_ERR(extent_root)) { |
2662 | ret = PTR_ERR(extent_root); | 2662 | ret = PTR_ERR(extent_root); |
2663 | goto recovery_tree_root; | 2663 | goto recovery_tree_root; |
2664 | } | 2664 | } |
2665 | extent_root->track_dirty = 1; | 2665 | extent_root->track_dirty = 1; |
2666 | fs_info->extent_root = extent_root; | 2666 | fs_info->extent_root = extent_root; |
2667 | 2667 | ||
2668 | location.objectid = BTRFS_DEV_TREE_OBJECTID; | 2668 | location.objectid = BTRFS_DEV_TREE_OBJECTID; |
2669 | dev_root = btrfs_read_tree_root(tree_root, &location); | 2669 | dev_root = btrfs_read_tree_root(tree_root, &location); |
2670 | if (IS_ERR(dev_root)) { | 2670 | if (IS_ERR(dev_root)) { |
2671 | ret = PTR_ERR(dev_root); | 2671 | ret = PTR_ERR(dev_root); |
2672 | goto recovery_tree_root; | 2672 | goto recovery_tree_root; |
2673 | } | 2673 | } |
2674 | dev_root->track_dirty = 1; | 2674 | dev_root->track_dirty = 1; |
2675 | fs_info->dev_root = dev_root; | 2675 | fs_info->dev_root = dev_root; |
2676 | btrfs_init_devices_late(fs_info); | 2676 | btrfs_init_devices_late(fs_info); |
2677 | 2677 | ||
2678 | location.objectid = BTRFS_CSUM_TREE_OBJECTID; | 2678 | location.objectid = BTRFS_CSUM_TREE_OBJECTID; |
2679 | csum_root = btrfs_read_tree_root(tree_root, &location); | 2679 | csum_root = btrfs_read_tree_root(tree_root, &location); |
2680 | if (IS_ERR(csum_root)) { | 2680 | if (IS_ERR(csum_root)) { |
2681 | ret = PTR_ERR(csum_root); | 2681 | ret = PTR_ERR(csum_root); |
2682 | goto recovery_tree_root; | 2682 | goto recovery_tree_root; |
2683 | } | 2683 | } |
2684 | csum_root->track_dirty = 1; | 2684 | csum_root->track_dirty = 1; |
2685 | fs_info->csum_root = csum_root; | 2685 | fs_info->csum_root = csum_root; |
2686 | 2686 | ||
2687 | location.objectid = BTRFS_QUOTA_TREE_OBJECTID; | 2687 | location.objectid = BTRFS_QUOTA_TREE_OBJECTID; |
2688 | quota_root = btrfs_read_tree_root(tree_root, &location); | 2688 | quota_root = btrfs_read_tree_root(tree_root, &location); |
2689 | if (!IS_ERR(quota_root)) { | 2689 | if (!IS_ERR(quota_root)) { |
2690 | quota_root->track_dirty = 1; | 2690 | quota_root->track_dirty = 1; |
2691 | fs_info->quota_enabled = 1; | 2691 | fs_info->quota_enabled = 1; |
2692 | fs_info->pending_quota_state = 1; | 2692 | fs_info->pending_quota_state = 1; |
2693 | fs_info->quota_root = quota_root; | 2693 | fs_info->quota_root = quota_root; |
2694 | } | 2694 | } |
2695 | 2695 | ||
2696 | location.objectid = BTRFS_UUID_TREE_OBJECTID; | 2696 | location.objectid = BTRFS_UUID_TREE_OBJECTID; |
2697 | uuid_root = btrfs_read_tree_root(tree_root, &location); | 2697 | uuid_root = btrfs_read_tree_root(tree_root, &location); |
2698 | if (IS_ERR(uuid_root)) { | 2698 | if (IS_ERR(uuid_root)) { |
2699 | ret = PTR_ERR(uuid_root); | 2699 | ret = PTR_ERR(uuid_root); |
2700 | if (ret != -ENOENT) | 2700 | if (ret != -ENOENT) |
2701 | goto recovery_tree_root; | 2701 | goto recovery_tree_root; |
2702 | create_uuid_tree = true; | 2702 | create_uuid_tree = true; |
2703 | check_uuid_tree = false; | 2703 | check_uuid_tree = false; |
2704 | } else { | 2704 | } else { |
2705 | uuid_root->track_dirty = 1; | 2705 | uuid_root->track_dirty = 1; |
2706 | fs_info->uuid_root = uuid_root; | 2706 | fs_info->uuid_root = uuid_root; |
2707 | create_uuid_tree = false; | 2707 | create_uuid_tree = false; |
2708 | check_uuid_tree = | 2708 | check_uuid_tree = |
2709 | generation != btrfs_super_uuid_tree_generation(disk_super); | 2709 | generation != btrfs_super_uuid_tree_generation(disk_super); |
2710 | } | 2710 | } |
2711 | 2711 | ||
2712 | fs_info->generation = generation; | 2712 | fs_info->generation = generation; |
2713 | fs_info->last_trans_committed = generation; | 2713 | fs_info->last_trans_committed = generation; |
2714 | 2714 | ||
2715 | ret = btrfs_recover_balance(fs_info); | 2715 | ret = btrfs_recover_balance(fs_info); |
2716 | if (ret) { | 2716 | if (ret) { |
2717 | printk(KERN_WARNING "BTRFS: failed to recover balance\n"); | 2717 | printk(KERN_WARNING "BTRFS: failed to recover balance\n"); |
2718 | goto fail_block_groups; | 2718 | goto fail_block_groups; |
2719 | } | 2719 | } |
2720 | 2720 | ||
2721 | ret = btrfs_init_dev_stats(fs_info); | 2721 | ret = btrfs_init_dev_stats(fs_info); |
2722 | if (ret) { | 2722 | if (ret) { |
2723 | printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n", | 2723 | printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n", |
2724 | ret); | 2724 | ret); |
2725 | goto fail_block_groups; | 2725 | goto fail_block_groups; |
2726 | } | 2726 | } |
2727 | 2727 | ||
2728 | ret = btrfs_init_dev_replace(fs_info); | 2728 | ret = btrfs_init_dev_replace(fs_info); |
2729 | if (ret) { | 2729 | if (ret) { |
2730 | pr_err("BTRFS: failed to init dev_replace: %d\n", ret); | 2730 | pr_err("BTRFS: failed to init dev_replace: %d\n", ret); |
2731 | goto fail_block_groups; | 2731 | goto fail_block_groups; |
2732 | } | 2732 | } |
2733 | 2733 | ||
2734 | btrfs_close_extra_devices(fs_info, fs_devices, 1); | 2734 | btrfs_close_extra_devices(fs_info, fs_devices, 1); |
2735 | 2735 | ||
2736 | ret = btrfs_sysfs_add_one(fs_info); | 2736 | ret = btrfs_sysfs_add_one(fs_info); |
2737 | if (ret) { | 2737 | if (ret) { |
2738 | pr_err("BTRFS: failed to init sysfs interface: %d\n", ret); | 2738 | pr_err("BTRFS: failed to init sysfs interface: %d\n", ret); |
2739 | goto fail_block_groups; | 2739 | goto fail_block_groups; |
2740 | } | 2740 | } |
2741 | 2741 | ||
2742 | ret = btrfs_init_space_info(fs_info); | 2742 | ret = btrfs_init_space_info(fs_info); |
2743 | if (ret) { | 2743 | if (ret) { |
2744 | printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret); | 2744 | printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret); |
2745 | goto fail_sysfs; | 2745 | goto fail_sysfs; |
2746 | } | 2746 | } |
2747 | 2747 | ||
2748 | ret = btrfs_read_block_groups(extent_root); | 2748 | ret = btrfs_read_block_groups(extent_root); |
2749 | if (ret) { | 2749 | if (ret) { |
2750 | printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret); | 2750 | printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret); |
2751 | goto fail_sysfs; | 2751 | goto fail_sysfs; |
2752 | } | 2752 | } |
2753 | fs_info->num_tolerated_disk_barrier_failures = | 2753 | fs_info->num_tolerated_disk_barrier_failures = |
2754 | btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); | 2754 | btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); |
2755 | if (fs_info->fs_devices->missing_devices > | 2755 | if (fs_info->fs_devices->missing_devices > |
2756 | fs_info->num_tolerated_disk_barrier_failures && | 2756 | fs_info->num_tolerated_disk_barrier_failures && |
2757 | !(sb->s_flags & MS_RDONLY)) { | 2757 | !(sb->s_flags & MS_RDONLY)) { |
2758 | printk(KERN_WARNING "BTRFS: " | 2758 | printk(KERN_WARNING "BTRFS: " |
2759 | "too many missing devices, writeable mount is not allowed\n"); | 2759 | "too many missing devices, writeable mount is not allowed\n"); |
2760 | goto fail_sysfs; | 2760 | goto fail_sysfs; |
2761 | } | 2761 | } |
2762 | 2762 | ||
2763 | fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, | 2763 | fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, |
2764 | "btrfs-cleaner"); | 2764 | "btrfs-cleaner"); |
2765 | if (IS_ERR(fs_info->cleaner_kthread)) | 2765 | if (IS_ERR(fs_info->cleaner_kthread)) |
2766 | goto fail_sysfs; | 2766 | goto fail_sysfs; |
2767 | 2767 | ||
2768 | fs_info->transaction_kthread = kthread_run(transaction_kthread, | 2768 | fs_info->transaction_kthread = kthread_run(transaction_kthread, |
2769 | tree_root, | 2769 | tree_root, |
2770 | "btrfs-transaction"); | 2770 | "btrfs-transaction"); |
2771 | if (IS_ERR(fs_info->transaction_kthread)) | 2771 | if (IS_ERR(fs_info->transaction_kthread)) |
2772 | goto fail_cleaner; | 2772 | goto fail_cleaner; |
2773 | 2773 | ||
2774 | if (!btrfs_test_opt(tree_root, SSD) && | 2774 | if (!btrfs_test_opt(tree_root, SSD) && |
2775 | !btrfs_test_opt(tree_root, NOSSD) && | 2775 | !btrfs_test_opt(tree_root, NOSSD) && |
2776 | !fs_info->fs_devices->rotating) { | 2776 | !fs_info->fs_devices->rotating) { |
2777 | printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD " | 2777 | printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD " |
2778 | "mode\n"); | 2778 | "mode\n"); |
2779 | btrfs_set_opt(fs_info->mount_opt, SSD); | 2779 | btrfs_set_opt(fs_info->mount_opt, SSD); |
2780 | } | 2780 | } |
2781 | 2781 | ||
2782 | /* Set the real inode map cache flag */ | 2782 | /* Set the real inode map cache flag */ |
2783 | if (btrfs_test_opt(tree_root, CHANGE_INODE_CACHE)) | 2783 | if (btrfs_test_opt(tree_root, CHANGE_INODE_CACHE)) |
2784 | btrfs_set_opt(tree_root->fs_info->mount_opt, INODE_MAP_CACHE); | 2784 | btrfs_set_opt(tree_root->fs_info->mount_opt, INODE_MAP_CACHE); |
2785 | 2785 | ||
2786 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY | 2786 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
2787 | if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) { | 2787 | if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) { |
2788 | ret = btrfsic_mount(tree_root, fs_devices, | 2788 | ret = btrfsic_mount(tree_root, fs_devices, |
2789 | btrfs_test_opt(tree_root, | 2789 | btrfs_test_opt(tree_root, |
2790 | CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? | 2790 | CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? |
2791 | 1 : 0, | 2791 | 1 : 0, |
2792 | fs_info->check_integrity_print_mask); | 2792 | fs_info->check_integrity_print_mask); |
2793 | if (ret) | 2793 | if (ret) |
2794 | printk(KERN_WARNING "BTRFS: failed to initialize" | 2794 | printk(KERN_WARNING "BTRFS: failed to initialize" |
2795 | " integrity check module %s\n", sb->s_id); | 2795 | " integrity check module %s\n", sb->s_id); |
2796 | } | 2796 | } |
2797 | #endif | 2797 | #endif |
2798 | ret = btrfs_read_qgroup_config(fs_info); | 2798 | ret = btrfs_read_qgroup_config(fs_info); |
2799 | if (ret) | 2799 | if (ret) |
2800 | goto fail_trans_kthread; | 2800 | goto fail_trans_kthread; |
2801 | 2801 | ||
2802 | /* do not make disk changes in broken FS */ | 2802 | /* do not make disk changes in broken FS */ |
2803 | if (btrfs_super_log_root(disk_super) != 0) { | 2803 | if (btrfs_super_log_root(disk_super) != 0) { |
2804 | u64 bytenr = btrfs_super_log_root(disk_super); | 2804 | u64 bytenr = btrfs_super_log_root(disk_super); |
2805 | 2805 | ||
2806 | if (fs_devices->rw_devices == 0) { | 2806 | if (fs_devices->rw_devices == 0) { |
2807 | printk(KERN_WARNING "BTRFS: log replay required " | 2807 | printk(KERN_WARNING "BTRFS: log replay required " |
2808 | "on RO media\n"); | 2808 | "on RO media\n"); |
2809 | err = -EIO; | 2809 | err = -EIO; |
2810 | goto fail_qgroup; | 2810 | goto fail_qgroup; |
2811 | } | 2811 | } |
2812 | blocksize = | 2812 | blocksize = |
2813 | btrfs_level_size(tree_root, | 2813 | btrfs_level_size(tree_root, |
2814 | btrfs_super_log_root_level(disk_super)); | 2814 | btrfs_super_log_root_level(disk_super)); |
2815 | 2815 | ||
2816 | log_tree_root = btrfs_alloc_root(fs_info); | 2816 | log_tree_root = btrfs_alloc_root(fs_info); |
2817 | if (!log_tree_root) { | 2817 | if (!log_tree_root) { |
2818 | err = -ENOMEM; | 2818 | err = -ENOMEM; |
2819 | goto fail_qgroup; | 2819 | goto fail_qgroup; |
2820 | } | 2820 | } |
2821 | 2821 | ||
2822 | __setup_root(nodesize, leafsize, sectorsize, stripesize, | 2822 | __setup_root(nodesize, leafsize, sectorsize, stripesize, |
2823 | log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); | 2823 | log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
2824 | 2824 | ||
2825 | log_tree_root->node = read_tree_block(tree_root, bytenr, | 2825 | log_tree_root->node = read_tree_block(tree_root, bytenr, |
2826 | blocksize, | 2826 | blocksize, |
2827 | generation + 1); | 2827 | generation + 1); |
2828 | if (!log_tree_root->node || | 2828 | if (!log_tree_root->node || |
2829 | !extent_buffer_uptodate(log_tree_root->node)) { | 2829 | !extent_buffer_uptodate(log_tree_root->node)) { |
2830 | printk(KERN_ERR "BTRFS: failed to read log tree\n"); | 2830 | printk(KERN_ERR "BTRFS: failed to read log tree\n"); |
2831 | free_extent_buffer(log_tree_root->node); | 2831 | free_extent_buffer(log_tree_root->node); |
2832 | kfree(log_tree_root); | 2832 | kfree(log_tree_root); |
2833 | goto fail_trans_kthread; | 2833 | goto fail_trans_kthread; |
2834 | } | 2834 | } |
2835 | /* returns with log_tree_root freed on success */ | 2835 | /* returns with log_tree_root freed on success */ |
2836 | ret = btrfs_recover_log_trees(log_tree_root); | 2836 | ret = btrfs_recover_log_trees(log_tree_root); |
2837 | if (ret) { | 2837 | if (ret) { |
2838 | btrfs_error(tree_root->fs_info, ret, | 2838 | btrfs_error(tree_root->fs_info, ret, |
2839 | "Failed to recover log tree"); | 2839 | "Failed to recover log tree"); |
2840 | free_extent_buffer(log_tree_root->node); | 2840 | free_extent_buffer(log_tree_root->node); |
2841 | kfree(log_tree_root); | 2841 | kfree(log_tree_root); |
2842 | goto fail_trans_kthread; | 2842 | goto fail_trans_kthread; |
2843 | } | 2843 | } |
2844 | 2844 | ||
2845 | if (sb->s_flags & MS_RDONLY) { | 2845 | if (sb->s_flags & MS_RDONLY) { |
2846 | ret = btrfs_commit_super(tree_root); | 2846 | ret = btrfs_commit_super(tree_root); |
2847 | if (ret) | 2847 | if (ret) |
2848 | goto fail_trans_kthread; | 2848 | goto fail_trans_kthread; |
2849 | } | 2849 | } |
2850 | } | 2850 | } |
2851 | 2851 | ||
2852 | ret = btrfs_find_orphan_roots(tree_root); | 2852 | ret = btrfs_find_orphan_roots(tree_root); |
2853 | if (ret) | 2853 | if (ret) |
2854 | goto fail_trans_kthread; | 2854 | goto fail_trans_kthread; |
2855 | 2855 | ||
2856 | if (!(sb->s_flags & MS_RDONLY)) { | 2856 | if (!(sb->s_flags & MS_RDONLY)) { |
2857 | ret = btrfs_cleanup_fs_roots(fs_info); | 2857 | ret = btrfs_cleanup_fs_roots(fs_info); |
2858 | if (ret) | 2858 | if (ret) |
2859 | goto fail_trans_kthread; | 2859 | goto fail_trans_kthread; |
2860 | 2860 | ||
2861 | ret = btrfs_recover_relocation(tree_root); | 2861 | ret = btrfs_recover_relocation(tree_root); |
2862 | if (ret < 0) { | 2862 | if (ret < 0) { |
2863 | printk(KERN_WARNING | 2863 | printk(KERN_WARNING |
2864 | "BTRFS: failed to recover relocation\n"); | 2864 | "BTRFS: failed to recover relocation\n"); |
2865 | err = -EINVAL; | 2865 | err = -EINVAL; |
2866 | goto fail_qgroup; | 2866 | goto fail_qgroup; |
2867 | } | 2867 | } |
2868 | } | 2868 | } |
2869 | 2869 | ||
2870 | location.objectid = BTRFS_FS_TREE_OBJECTID; | 2870 | location.objectid = BTRFS_FS_TREE_OBJECTID; |
2871 | location.type = BTRFS_ROOT_ITEM_KEY; | 2871 | location.type = BTRFS_ROOT_ITEM_KEY; |
2872 | location.offset = 0; | 2872 | location.offset = 0; |
2873 | 2873 | ||
2874 | fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); | 2874 | fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); |
2875 | if (IS_ERR(fs_info->fs_root)) { | 2875 | if (IS_ERR(fs_info->fs_root)) { |
2876 | err = PTR_ERR(fs_info->fs_root); | 2876 | err = PTR_ERR(fs_info->fs_root); |
2877 | goto fail_qgroup; | 2877 | goto fail_qgroup; |
2878 | } | 2878 | } |
2879 | 2879 | ||
2880 | if (sb->s_flags & MS_RDONLY) | 2880 | if (sb->s_flags & MS_RDONLY) |
2881 | return 0; | 2881 | return 0; |
2882 | 2882 | ||
2883 | down_read(&fs_info->cleanup_work_sem); | 2883 | down_read(&fs_info->cleanup_work_sem); |
2884 | if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || | 2884 | if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || |
2885 | (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { | 2885 | (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { |
2886 | up_read(&fs_info->cleanup_work_sem); | 2886 | up_read(&fs_info->cleanup_work_sem); |
2887 | close_ctree(tree_root); | 2887 | close_ctree(tree_root); |
2888 | return ret; | 2888 | return ret; |
2889 | } | 2889 | } |
2890 | up_read(&fs_info->cleanup_work_sem); | 2890 | up_read(&fs_info->cleanup_work_sem); |
2891 | 2891 | ||
2892 | ret = btrfs_resume_balance_async(fs_info); | 2892 | ret = btrfs_resume_balance_async(fs_info); |
2893 | if (ret) { | 2893 | if (ret) { |
2894 | printk(KERN_WARNING "BTRFS: failed to resume balance\n"); | 2894 | printk(KERN_WARNING "BTRFS: failed to resume balance\n"); |
2895 | close_ctree(tree_root); | 2895 | close_ctree(tree_root); |
2896 | return ret; | 2896 | return ret; |
2897 | } | 2897 | } |
2898 | 2898 | ||
2899 | ret = btrfs_resume_dev_replace_async(fs_info); | 2899 | ret = btrfs_resume_dev_replace_async(fs_info); |
2900 | if (ret) { | 2900 | if (ret) { |
2901 | pr_warn("BTRFS: failed to resume dev_replace\n"); | 2901 | pr_warn("BTRFS: failed to resume dev_replace\n"); |
2902 | close_ctree(tree_root); | 2902 | close_ctree(tree_root); |
2903 | return ret; | 2903 | return ret; |
2904 | } | 2904 | } |
2905 | 2905 | ||
2906 | btrfs_qgroup_rescan_resume(fs_info); | 2906 | btrfs_qgroup_rescan_resume(fs_info); |
2907 | 2907 | ||
2908 | if (create_uuid_tree) { | 2908 | if (create_uuid_tree) { |
2909 | pr_info("BTRFS: creating UUID tree\n"); | 2909 | pr_info("BTRFS: creating UUID tree\n"); |
2910 | ret = btrfs_create_uuid_tree(fs_info); | 2910 | ret = btrfs_create_uuid_tree(fs_info); |
2911 | if (ret) { | 2911 | if (ret) { |
2912 | pr_warn("BTRFS: failed to create the UUID tree %d\n", | 2912 | pr_warn("BTRFS: failed to create the UUID tree %d\n", |
2913 | ret); | 2913 | ret); |
2914 | close_ctree(tree_root); | 2914 | close_ctree(tree_root); |
2915 | return ret; | 2915 | return ret; |
2916 | } | 2916 | } |
2917 | } else if (check_uuid_tree || | 2917 | } else if (check_uuid_tree || |
2918 | btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) { | 2918 | btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) { |
2919 | pr_info("BTRFS: checking UUID tree\n"); | 2919 | pr_info("BTRFS: checking UUID tree\n"); |
2920 | ret = btrfs_check_uuid_tree(fs_info); | 2920 | ret = btrfs_check_uuid_tree(fs_info); |
2921 | if (ret) { | 2921 | if (ret) { |
2922 | pr_warn("BTRFS: failed to check the UUID tree %d\n", | 2922 | pr_warn("BTRFS: failed to check the UUID tree %d\n", |
2923 | ret); | 2923 | ret); |
2924 | close_ctree(tree_root); | 2924 | close_ctree(tree_root); |
2925 | return ret; | 2925 | return ret; |
2926 | } | 2926 | } |
2927 | } else { | 2927 | } else { |
2928 | fs_info->update_uuid_tree_gen = 1; | 2928 | fs_info->update_uuid_tree_gen = 1; |
2929 | } | 2929 | } |
2930 | 2930 | ||
2931 | return 0; | 2931 | return 0; |
2932 | 2932 | ||
2933 | fail_qgroup: | 2933 | fail_qgroup: |
2934 | btrfs_free_qgroup_config(fs_info); | 2934 | btrfs_free_qgroup_config(fs_info); |
2935 | fail_trans_kthread: | 2935 | fail_trans_kthread: |
2936 | kthread_stop(fs_info->transaction_kthread); | 2936 | kthread_stop(fs_info->transaction_kthread); |
2937 | btrfs_cleanup_transaction(fs_info->tree_root); | 2937 | btrfs_cleanup_transaction(fs_info->tree_root); |
2938 | del_fs_roots(fs_info); | 2938 | del_fs_roots(fs_info); |
2939 | fail_cleaner: | 2939 | fail_cleaner: |
2940 | kthread_stop(fs_info->cleaner_kthread); | 2940 | kthread_stop(fs_info->cleaner_kthread); |
2941 | 2941 | ||
2942 | /* | 2942 | /* |
2943 | * make sure we're done with the btree inode before we stop our | 2943 | * make sure we're done with the btree inode before we stop our |
2944 | * kthreads | 2944 | * kthreads |
2945 | */ | 2945 | */ |
2946 | filemap_write_and_wait(fs_info->btree_inode->i_mapping); | 2946 | filemap_write_and_wait(fs_info->btree_inode->i_mapping); |
2947 | 2947 | ||
2948 | fail_sysfs: | 2948 | fail_sysfs: |
2949 | btrfs_sysfs_remove_one(fs_info); | 2949 | btrfs_sysfs_remove_one(fs_info); |
2950 | 2950 | ||
2951 | fail_block_groups: | 2951 | fail_block_groups: |
2952 | btrfs_put_block_group_cache(fs_info); | 2952 | btrfs_put_block_group_cache(fs_info); |
2953 | btrfs_free_block_groups(fs_info); | 2953 | btrfs_free_block_groups(fs_info); |
2954 | 2954 | ||
2955 | fail_tree_roots: | 2955 | fail_tree_roots: |
2956 | free_root_pointers(fs_info, 1); | 2956 | free_root_pointers(fs_info, 1); |
2957 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); | 2957 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); |
2958 | 2958 | ||
2959 | fail_sb_buffer: | 2959 | fail_sb_buffer: |
2960 | btrfs_stop_all_workers(fs_info); | 2960 | btrfs_stop_all_workers(fs_info); |
2961 | fail_alloc: | 2961 | fail_alloc: |
2962 | fail_iput: | 2962 | fail_iput: |
2963 | btrfs_mapping_tree_free(&fs_info->mapping_tree); | 2963 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
2964 | 2964 | ||
2965 | iput(fs_info->btree_inode); | 2965 | iput(fs_info->btree_inode); |
2966 | fail_delalloc_bytes: | 2966 | fail_delalloc_bytes: |
2967 | percpu_counter_destroy(&fs_info->delalloc_bytes); | 2967 | percpu_counter_destroy(&fs_info->delalloc_bytes); |
2968 | fail_dirty_metadata_bytes: | 2968 | fail_dirty_metadata_bytes: |
2969 | percpu_counter_destroy(&fs_info->dirty_metadata_bytes); | 2969 | percpu_counter_destroy(&fs_info->dirty_metadata_bytes); |
2970 | fail_bdi: | 2970 | fail_bdi: |
2971 | bdi_destroy(&fs_info->bdi); | 2971 | bdi_destroy(&fs_info->bdi); |
2972 | fail_srcu: | 2972 | fail_srcu: |
2973 | cleanup_srcu_struct(&fs_info->subvol_srcu); | 2973 | cleanup_srcu_struct(&fs_info->subvol_srcu); |
2974 | fail: | 2974 | fail: |
2975 | btrfs_free_stripe_hash_table(fs_info); | 2975 | btrfs_free_stripe_hash_table(fs_info); |
2976 | btrfs_close_devices(fs_info->fs_devices); | 2976 | btrfs_close_devices(fs_info->fs_devices); |
2977 | return err; | 2977 | return err; |
2978 | 2978 | ||
2979 | recovery_tree_root: | 2979 | recovery_tree_root: |
2980 | if (!btrfs_test_opt(tree_root, RECOVERY)) | 2980 | if (!btrfs_test_opt(tree_root, RECOVERY)) |
2981 | goto fail_tree_roots; | 2981 | goto fail_tree_roots; |
2982 | 2982 | ||
2983 | free_root_pointers(fs_info, 0); | 2983 | free_root_pointers(fs_info, 0); |
2984 | 2984 | ||
2985 | /* don't use the log in recovery mode, it won't be valid */ | 2985 | /* don't use the log in recovery mode, it won't be valid */ |
2986 | btrfs_set_super_log_root(disk_super, 0); | 2986 | btrfs_set_super_log_root(disk_super, 0); |
2987 | 2987 | ||
2988 | /* we can't trust the free space cache either */ | 2988 | /* we can't trust the free space cache either */ |
2989 | btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); | 2989 | btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); |
2990 | 2990 | ||
2991 | ret = next_root_backup(fs_info, fs_info->super_copy, | 2991 | ret = next_root_backup(fs_info, fs_info->super_copy, |
2992 | &num_backups_tried, &backup_index); | 2992 | &num_backups_tried, &backup_index); |
2993 | if (ret == -1) | 2993 | if (ret == -1) |
2994 | goto fail_block_groups; | 2994 | goto fail_block_groups; |
2995 | goto retry_root_backup; | 2995 | goto retry_root_backup; |
2996 | } | 2996 | } |
2997 | 2997 | ||
2998 | static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) | 2998 | static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) |
2999 | { | 2999 | { |
3000 | if (uptodate) { | 3000 | if (uptodate) { |
3001 | set_buffer_uptodate(bh); | 3001 | set_buffer_uptodate(bh); |
3002 | } else { | 3002 | } else { |
3003 | struct btrfs_device *device = (struct btrfs_device *) | 3003 | struct btrfs_device *device = (struct btrfs_device *) |
3004 | bh->b_private; | 3004 | bh->b_private; |
3005 | 3005 | ||
3006 | printk_ratelimited_in_rcu(KERN_WARNING "BTRFS: lost page write due to " | 3006 | printk_ratelimited_in_rcu(KERN_WARNING "BTRFS: lost page write due to " |
3007 | "I/O error on %s\n", | 3007 | "I/O error on %s\n", |
3008 | rcu_str_deref(device->name)); | 3008 | rcu_str_deref(device->name)); |
3009 | /* note, we dont' set_buffer_write_io_error because we have | 3009 | /* note, we dont' set_buffer_write_io_error because we have |
3010 | * our own ways of dealing with the IO errors | 3010 | * our own ways of dealing with the IO errors |
3011 | */ | 3011 | */ |
3012 | clear_buffer_uptodate(bh); | 3012 | clear_buffer_uptodate(bh); |
3013 | btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); | 3013 | btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); |
3014 | } | 3014 | } |
3015 | unlock_buffer(bh); | 3015 | unlock_buffer(bh); |
3016 | put_bh(bh); | 3016 | put_bh(bh); |
3017 | } | 3017 | } |
3018 | 3018 | ||
3019 | struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) | 3019 | struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) |
3020 | { | 3020 | { |
3021 | struct buffer_head *bh; | 3021 | struct buffer_head *bh; |
3022 | struct buffer_head *latest = NULL; | 3022 | struct buffer_head *latest = NULL; |
3023 | struct btrfs_super_block *super; | 3023 | struct btrfs_super_block *super; |
3024 | int i; | 3024 | int i; |
3025 | u64 transid = 0; | 3025 | u64 transid = 0; |
3026 | u64 bytenr; | 3026 | u64 bytenr; |
3027 | 3027 | ||
3028 | /* we would like to check all the supers, but that would make | 3028 | /* we would like to check all the supers, but that would make |
3029 | * a btrfs mount succeed after a mkfs from a different FS. | 3029 | * a btrfs mount succeed after a mkfs from a different FS. |
3030 | * So, we need to add a special mount option to scan for | 3030 | * So, we need to add a special mount option to scan for |
3031 | * later supers, using BTRFS_SUPER_MIRROR_MAX instead | 3031 | * later supers, using BTRFS_SUPER_MIRROR_MAX instead |
3032 | */ | 3032 | */ |
3033 | for (i = 0; i < 1; i++) { | 3033 | for (i = 0; i < 1; i++) { |
3034 | bytenr = btrfs_sb_offset(i); | 3034 | bytenr = btrfs_sb_offset(i); |
3035 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= | 3035 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= |
3036 | i_size_read(bdev->bd_inode)) | 3036 | i_size_read(bdev->bd_inode)) |
3037 | break; | 3037 | break; |
3038 | bh = __bread(bdev, bytenr / 4096, | 3038 | bh = __bread(bdev, bytenr / 4096, |
3039 | BTRFS_SUPER_INFO_SIZE); | 3039 | BTRFS_SUPER_INFO_SIZE); |
3040 | if (!bh) | 3040 | if (!bh) |
3041 | continue; | 3041 | continue; |
3042 | 3042 | ||
3043 | super = (struct btrfs_super_block *)bh->b_data; | 3043 | super = (struct btrfs_super_block *)bh->b_data; |
3044 | if (btrfs_super_bytenr(super) != bytenr || | 3044 | if (btrfs_super_bytenr(super) != bytenr || |
3045 | btrfs_super_magic(super) != BTRFS_MAGIC) { | 3045 | btrfs_super_magic(super) != BTRFS_MAGIC) { |
3046 | brelse(bh); | 3046 | brelse(bh); |
3047 | continue; | 3047 | continue; |
3048 | } | 3048 | } |
3049 | 3049 | ||
3050 | if (!latest || btrfs_super_generation(super) > transid) { | 3050 | if (!latest || btrfs_super_generation(super) > transid) { |
3051 | brelse(latest); | 3051 | brelse(latest); |
3052 | latest = bh; | 3052 | latest = bh; |
3053 | transid = btrfs_super_generation(super); | 3053 | transid = btrfs_super_generation(super); |
3054 | } else { | 3054 | } else { |
3055 | brelse(bh); | 3055 | brelse(bh); |
3056 | } | 3056 | } |
3057 | } | 3057 | } |
3058 | return latest; | 3058 | return latest; |
3059 | } | 3059 | } |
3060 | 3060 | ||
3061 | /* | 3061 | /* |
3062 | * this should be called twice, once with wait == 0 and | 3062 | * this should be called twice, once with wait == 0 and |
3063 | * once with wait == 1. When wait == 0 is done, all the buffer heads | 3063 | * once with wait == 1. When wait == 0 is done, all the buffer heads |
3064 | * we write are pinned. | 3064 | * we write are pinned. |
3065 | * | 3065 | * |
3066 | * They are released when wait == 1 is done. | 3066 | * They are released when wait == 1 is done. |
3067 | * max_mirrors must be the same for both runs, and it indicates how | 3067 | * max_mirrors must be the same for both runs, and it indicates how |
3068 | * many supers on this one device should be written. | 3068 | * many supers on this one device should be written. |
3069 | * | 3069 | * |
3070 | * max_mirrors == 0 means to write them all. | 3070 | * max_mirrors == 0 means to write them all. |
3071 | */ | 3071 | */ |
3072 | static int write_dev_supers(struct btrfs_device *device, | 3072 | static int write_dev_supers(struct btrfs_device *device, |
3073 | struct btrfs_super_block *sb, | 3073 | struct btrfs_super_block *sb, |
3074 | int do_barriers, int wait, int max_mirrors) | 3074 | int do_barriers, int wait, int max_mirrors) |
3075 | { | 3075 | { |
3076 | struct buffer_head *bh; | 3076 | struct buffer_head *bh; |
3077 | int i; | 3077 | int i; |
3078 | int ret; | 3078 | int ret; |
3079 | int errors = 0; | 3079 | int errors = 0; |
3080 | u32 crc; | 3080 | u32 crc; |
3081 | u64 bytenr; | 3081 | u64 bytenr; |
3082 | 3082 | ||
3083 | if (max_mirrors == 0) | 3083 | if (max_mirrors == 0) |
3084 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; | 3084 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; |
3085 | 3085 | ||
3086 | for (i = 0; i < max_mirrors; i++) { | 3086 | for (i = 0; i < max_mirrors; i++) { |
3087 | bytenr = btrfs_sb_offset(i); | 3087 | bytenr = btrfs_sb_offset(i); |
3088 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) | 3088 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) |
3089 | break; | 3089 | break; |
3090 | 3090 | ||
3091 | if (wait) { | 3091 | if (wait) { |
3092 | bh = __find_get_block(device->bdev, bytenr / 4096, | 3092 | bh = __find_get_block(device->bdev, bytenr / 4096, |
3093 | BTRFS_SUPER_INFO_SIZE); | 3093 | BTRFS_SUPER_INFO_SIZE); |
3094 | if (!bh) { | 3094 | if (!bh) { |
3095 | errors++; | 3095 | errors++; |
3096 | continue; | 3096 | continue; |
3097 | } | 3097 | } |
3098 | wait_on_buffer(bh); | 3098 | wait_on_buffer(bh); |
3099 | if (!buffer_uptodate(bh)) | 3099 | if (!buffer_uptodate(bh)) |
3100 | errors++; | 3100 | errors++; |
3101 | 3101 | ||
3102 | /* drop our reference */ | 3102 | /* drop our reference */ |
3103 | brelse(bh); | 3103 | brelse(bh); |
3104 | 3104 | ||
3105 | /* drop the reference from the wait == 0 run */ | 3105 | /* drop the reference from the wait == 0 run */ |
3106 | brelse(bh); | 3106 | brelse(bh); |
3107 | continue; | 3107 | continue; |
3108 | } else { | 3108 | } else { |
3109 | btrfs_set_super_bytenr(sb, bytenr); | 3109 | btrfs_set_super_bytenr(sb, bytenr); |
3110 | 3110 | ||
3111 | crc = ~(u32)0; | 3111 | crc = ~(u32)0; |
3112 | crc = btrfs_csum_data((char *)sb + | 3112 | crc = btrfs_csum_data((char *)sb + |
3113 | BTRFS_CSUM_SIZE, crc, | 3113 | BTRFS_CSUM_SIZE, crc, |
3114 | BTRFS_SUPER_INFO_SIZE - | 3114 | BTRFS_SUPER_INFO_SIZE - |
3115 | BTRFS_CSUM_SIZE); | 3115 | BTRFS_CSUM_SIZE); |
3116 | btrfs_csum_final(crc, sb->csum); | 3116 | btrfs_csum_final(crc, sb->csum); |
3117 | 3117 | ||
3118 | /* | 3118 | /* |
3119 | * one reference for us, and we leave it for the | 3119 | * one reference for us, and we leave it for the |
3120 | * caller | 3120 | * caller |
3121 | */ | 3121 | */ |
3122 | bh = __getblk(device->bdev, bytenr / 4096, | 3122 | bh = __getblk(device->bdev, bytenr / 4096, |
3123 | BTRFS_SUPER_INFO_SIZE); | 3123 | BTRFS_SUPER_INFO_SIZE); |
3124 | if (!bh) { | 3124 | if (!bh) { |
3125 | printk(KERN_ERR "BTRFS: couldn't get super " | 3125 | printk(KERN_ERR "BTRFS: couldn't get super " |
3126 | "buffer head for bytenr %Lu\n", bytenr); | 3126 | "buffer head for bytenr %Lu\n", bytenr); |
3127 | errors++; | 3127 | errors++; |
3128 | continue; | 3128 | continue; |
3129 | } | 3129 | } |
3130 | 3130 | ||
3131 | memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); | 3131 | memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); |
3132 | 3132 | ||
3133 | /* one reference for submit_bh */ | 3133 | /* one reference for submit_bh */ |
3134 | get_bh(bh); | 3134 | get_bh(bh); |
3135 | 3135 | ||
3136 | set_buffer_uptodate(bh); | 3136 | set_buffer_uptodate(bh); |
3137 | lock_buffer(bh); | 3137 | lock_buffer(bh); |
3138 | bh->b_end_io = btrfs_end_buffer_write_sync; | 3138 | bh->b_end_io = btrfs_end_buffer_write_sync; |
3139 | bh->b_private = device; | 3139 | bh->b_private = device; |
3140 | } | 3140 | } |
3141 | 3141 | ||
3142 | /* | 3142 | /* |
3143 | * we fua the first super. The others we allow | 3143 | * we fua the first super. The others we allow |
3144 | * to go down lazy. | 3144 | * to go down lazy. |
3145 | */ | 3145 | */ |
3146 | if (i == 0) | 3146 | if (i == 0) |
3147 | ret = btrfsic_submit_bh(WRITE_FUA, bh); | 3147 | ret = btrfsic_submit_bh(WRITE_FUA, bh); |
3148 | else | 3148 | else |
3149 | ret = btrfsic_submit_bh(WRITE_SYNC, bh); | 3149 | ret = btrfsic_submit_bh(WRITE_SYNC, bh); |
3150 | if (ret) | 3150 | if (ret) |
3151 | errors++; | 3151 | errors++; |
3152 | } | 3152 | } |
3153 | return errors < i ? 0 : -1; | 3153 | return errors < i ? 0 : -1; |
3154 | } | 3154 | } |
3155 | 3155 | ||
3156 | /* | 3156 | /* |
3157 | * endio for the write_dev_flush, this will wake anyone waiting | 3157 | * endio for the write_dev_flush, this will wake anyone waiting |
3158 | * for the barrier when it is done | 3158 | * for the barrier when it is done |
3159 | */ | 3159 | */ |
3160 | static void btrfs_end_empty_barrier(struct bio *bio, int err) | 3160 | static void btrfs_end_empty_barrier(struct bio *bio, int err) |
3161 | { | 3161 | { |
3162 | if (err) { | 3162 | if (err) { |
3163 | if (err == -EOPNOTSUPP) | 3163 | if (err == -EOPNOTSUPP) |
3164 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); | 3164 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); |
3165 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 3165 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
3166 | } | 3166 | } |
3167 | if (bio->bi_private) | 3167 | if (bio->bi_private) |
3168 | complete(bio->bi_private); | 3168 | complete(bio->bi_private); |
3169 | bio_put(bio); | 3169 | bio_put(bio); |
3170 | } | 3170 | } |
3171 | 3171 | ||
3172 | /* | 3172 | /* |
3173 | * trigger flushes for one the devices. If you pass wait == 0, the flushes are | 3173 | * trigger flushes for one the devices. If you pass wait == 0, the flushes are |
3174 | * sent down. With wait == 1, it waits for the previous flush. | 3174 | * sent down. With wait == 1, it waits for the previous flush. |
3175 | * | 3175 | * |
3176 | * any device where the flush fails with eopnotsupp are flagged as not-barrier | 3176 | * any device where the flush fails with eopnotsupp are flagged as not-barrier |
3177 | * capable | 3177 | * capable |
3178 | */ | 3178 | */ |
3179 | static int write_dev_flush(struct btrfs_device *device, int wait) | 3179 | static int write_dev_flush(struct btrfs_device *device, int wait) |
3180 | { | 3180 | { |
3181 | struct bio *bio; | 3181 | struct bio *bio; |
3182 | int ret = 0; | 3182 | int ret = 0; |
3183 | 3183 | ||
3184 | if (device->nobarriers) | 3184 | if (device->nobarriers) |
3185 | return 0; | 3185 | return 0; |
3186 | 3186 | ||
3187 | if (wait) { | 3187 | if (wait) { |
3188 | bio = device->flush_bio; | 3188 | bio = device->flush_bio; |
3189 | if (!bio) | 3189 | if (!bio) |
3190 | return 0; | 3190 | return 0; |
3191 | 3191 | ||
3192 | wait_for_completion(&device->flush_wait); | 3192 | wait_for_completion(&device->flush_wait); |
3193 | 3193 | ||
3194 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { | 3194 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { |
3195 | printk_in_rcu("BTRFS: disabling barriers on dev %s\n", | 3195 | printk_in_rcu("BTRFS: disabling barriers on dev %s\n", |
3196 | rcu_str_deref(device->name)); | 3196 | rcu_str_deref(device->name)); |
3197 | device->nobarriers = 1; | 3197 | device->nobarriers = 1; |
3198 | } else if (!bio_flagged(bio, BIO_UPTODATE)) { | 3198 | } else if (!bio_flagged(bio, BIO_UPTODATE)) { |
3199 | ret = -EIO; | 3199 | ret = -EIO; |
3200 | btrfs_dev_stat_inc_and_print(device, | 3200 | btrfs_dev_stat_inc_and_print(device, |
3201 | BTRFS_DEV_STAT_FLUSH_ERRS); | 3201 | BTRFS_DEV_STAT_FLUSH_ERRS); |
3202 | } | 3202 | } |
3203 | 3203 | ||
3204 | /* drop the reference from the wait == 0 run */ | 3204 | /* drop the reference from the wait == 0 run */ |
3205 | bio_put(bio); | 3205 | bio_put(bio); |
3206 | device->flush_bio = NULL; | 3206 | device->flush_bio = NULL; |
3207 | 3207 | ||
3208 | return ret; | 3208 | return ret; |
3209 | } | 3209 | } |
3210 | 3210 | ||
3211 | /* | 3211 | /* |
3212 | * one reference for us, and we leave it for the | 3212 | * one reference for us, and we leave it for the |
3213 | * caller | 3213 | * caller |
3214 | */ | 3214 | */ |
3215 | device->flush_bio = NULL; | 3215 | device->flush_bio = NULL; |
3216 | bio = btrfs_io_bio_alloc(GFP_NOFS, 0); | 3216 | bio = btrfs_io_bio_alloc(GFP_NOFS, 0); |
3217 | if (!bio) | 3217 | if (!bio) |
3218 | return -ENOMEM; | 3218 | return -ENOMEM; |
3219 | 3219 | ||
3220 | bio->bi_end_io = btrfs_end_empty_barrier; | 3220 | bio->bi_end_io = btrfs_end_empty_barrier; |
3221 | bio->bi_bdev = device->bdev; | 3221 | bio->bi_bdev = device->bdev; |
3222 | init_completion(&device->flush_wait); | 3222 | init_completion(&device->flush_wait); |
3223 | bio->bi_private = &device->flush_wait; | 3223 | bio->bi_private = &device->flush_wait; |
3224 | device->flush_bio = bio; | 3224 | device->flush_bio = bio; |
3225 | 3225 | ||
3226 | bio_get(bio); | 3226 | bio_get(bio); |
3227 | btrfsic_submit_bio(WRITE_FLUSH, bio); | 3227 | btrfsic_submit_bio(WRITE_FLUSH, bio); |
3228 | 3228 | ||
3229 | return 0; | 3229 | return 0; |
3230 | } | 3230 | } |
3231 | 3231 | ||
3232 | /* | 3232 | /* |
3233 | * send an empty flush down to each device in parallel, | 3233 | * send an empty flush down to each device in parallel, |
3234 | * then wait for them | 3234 | * then wait for them |
3235 | */ | 3235 | */ |
3236 | static int barrier_all_devices(struct btrfs_fs_info *info) | 3236 | static int barrier_all_devices(struct btrfs_fs_info *info) |
3237 | { | 3237 | { |
3238 | struct list_head *head; | 3238 | struct list_head *head; |
3239 | struct btrfs_device *dev; | 3239 | struct btrfs_device *dev; |
3240 | int errors_send = 0; | 3240 | int errors_send = 0; |
3241 | int errors_wait = 0; | 3241 | int errors_wait = 0; |
3242 | int ret; | 3242 | int ret; |
3243 | 3243 | ||
3244 | /* send down all the barriers */ | 3244 | /* send down all the barriers */ |
3245 | head = &info->fs_devices->devices; | 3245 | head = &info->fs_devices->devices; |
3246 | list_for_each_entry_rcu(dev, head, dev_list) { | 3246 | list_for_each_entry_rcu(dev, head, dev_list) { |
3247 | if (dev->missing) | 3247 | if (dev->missing) |
3248 | continue; | 3248 | continue; |
3249 | if (!dev->bdev) { | 3249 | if (!dev->bdev) { |
3250 | errors_send++; | 3250 | errors_send++; |
3251 | continue; | 3251 | continue; |
3252 | } | 3252 | } |
3253 | if (!dev->in_fs_metadata || !dev->writeable) | 3253 | if (!dev->in_fs_metadata || !dev->writeable) |
3254 | continue; | 3254 | continue; |
3255 | 3255 | ||
3256 | ret = write_dev_flush(dev, 0); | 3256 | ret = write_dev_flush(dev, 0); |
3257 | if (ret) | 3257 | if (ret) |
3258 | errors_send++; | 3258 | errors_send++; |
3259 | } | 3259 | } |
3260 | 3260 | ||
3261 | /* wait for all the barriers */ | 3261 | /* wait for all the barriers */ |
3262 | list_for_each_entry_rcu(dev, head, dev_list) { | 3262 | list_for_each_entry_rcu(dev, head, dev_list) { |
3263 | if (dev->missing) | 3263 | if (dev->missing) |
3264 | continue; | 3264 | continue; |
3265 | if (!dev->bdev) { | 3265 | if (!dev->bdev) { |
3266 | errors_wait++; | 3266 | errors_wait++; |
3267 | continue; | 3267 | continue; |
3268 | } | 3268 | } |
3269 | if (!dev->in_fs_metadata || !dev->writeable) | 3269 | if (!dev->in_fs_metadata || !dev->writeable) |
3270 | continue; | 3270 | continue; |
3271 | 3271 | ||
3272 | ret = write_dev_flush(dev, 1); | 3272 | ret = write_dev_flush(dev, 1); |
3273 | if (ret) | 3273 | if (ret) |
3274 | errors_wait++; | 3274 | errors_wait++; |
3275 | } | 3275 | } |
3276 | if (errors_send > info->num_tolerated_disk_barrier_failures || | 3276 | if (errors_send > info->num_tolerated_disk_barrier_failures || |
3277 | errors_wait > info->num_tolerated_disk_barrier_failures) | 3277 | errors_wait > info->num_tolerated_disk_barrier_failures) |
3278 | return -EIO; | 3278 | return -EIO; |
3279 | return 0; | 3279 | return 0; |
3280 | } | 3280 | } |
3281 | 3281 | ||
3282 | int btrfs_calc_num_tolerated_disk_barrier_failures( | 3282 | int btrfs_calc_num_tolerated_disk_barrier_failures( |
3283 | struct btrfs_fs_info *fs_info) | 3283 | struct btrfs_fs_info *fs_info) |
3284 | { | 3284 | { |
3285 | struct btrfs_ioctl_space_info space; | 3285 | struct btrfs_ioctl_space_info space; |
3286 | struct btrfs_space_info *sinfo; | 3286 | struct btrfs_space_info *sinfo; |
3287 | u64 types[] = {BTRFS_BLOCK_GROUP_DATA, | 3287 | u64 types[] = {BTRFS_BLOCK_GROUP_DATA, |
3288 | BTRFS_BLOCK_GROUP_SYSTEM, | 3288 | BTRFS_BLOCK_GROUP_SYSTEM, |
3289 | BTRFS_BLOCK_GROUP_METADATA, | 3289 | BTRFS_BLOCK_GROUP_METADATA, |
3290 | BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; | 3290 | BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; |
3291 | int num_types = 4; | 3291 | int num_types = 4; |
3292 | int i; | 3292 | int i; |
3293 | int c; | 3293 | int c; |
3294 | int num_tolerated_disk_barrier_failures = | 3294 | int num_tolerated_disk_barrier_failures = |
3295 | (int)fs_info->fs_devices->num_devices; | 3295 | (int)fs_info->fs_devices->num_devices; |
3296 | 3296 | ||
3297 | for (i = 0; i < num_types; i++) { | 3297 | for (i = 0; i < num_types; i++) { |
3298 | struct btrfs_space_info *tmp; | 3298 | struct btrfs_space_info *tmp; |
3299 | 3299 | ||
3300 | sinfo = NULL; | 3300 | sinfo = NULL; |
3301 | rcu_read_lock(); | 3301 | rcu_read_lock(); |
3302 | list_for_each_entry_rcu(tmp, &fs_info->space_info, list) { | 3302 | list_for_each_entry_rcu(tmp, &fs_info->space_info, list) { |
3303 | if (tmp->flags == types[i]) { | 3303 | if (tmp->flags == types[i]) { |
3304 | sinfo = tmp; | 3304 | sinfo = tmp; |
3305 | break; | 3305 | break; |
3306 | } | 3306 | } |
3307 | } | 3307 | } |
3308 | rcu_read_unlock(); | 3308 | rcu_read_unlock(); |
3309 | 3309 | ||
3310 | if (!sinfo) | 3310 | if (!sinfo) |
3311 | continue; | 3311 | continue; |
3312 | 3312 | ||
3313 | down_read(&sinfo->groups_sem); | 3313 | down_read(&sinfo->groups_sem); |
3314 | for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { | 3314 | for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { |
3315 | if (!list_empty(&sinfo->block_groups[c])) { | 3315 | if (!list_empty(&sinfo->block_groups[c])) { |
3316 | u64 flags; | 3316 | u64 flags; |
3317 | 3317 | ||
3318 | btrfs_get_block_group_info( | 3318 | btrfs_get_block_group_info( |
3319 | &sinfo->block_groups[c], &space); | 3319 | &sinfo->block_groups[c], &space); |
3320 | if (space.total_bytes == 0 || | 3320 | if (space.total_bytes == 0 || |
3321 | space.used_bytes == 0) | 3321 | space.used_bytes == 0) |
3322 | continue; | 3322 | continue; |
3323 | flags = space.flags; | 3323 | flags = space.flags; |
3324 | /* | 3324 | /* |
3325 | * return | 3325 | * return |
3326 | * 0: if dup, single or RAID0 is configured for | 3326 | * 0: if dup, single or RAID0 is configured for |
3327 | * any of metadata, system or data, else | 3327 | * any of metadata, system or data, else |
3328 | * 1: if RAID5 is configured, or if RAID1 or | 3328 | * 1: if RAID5 is configured, or if RAID1 or |
3329 | * RAID10 is configured and only two mirrors | 3329 | * RAID10 is configured and only two mirrors |
3330 | * are used, else | 3330 | * are used, else |
3331 | * 2: if RAID6 is configured, else | 3331 | * 2: if RAID6 is configured, else |
3332 | * num_mirrors - 1: if RAID1 or RAID10 is | 3332 | * num_mirrors - 1: if RAID1 or RAID10 is |
3333 | * configured and more than | 3333 | * configured and more than |
3334 | * 2 mirrors are used. | 3334 | * 2 mirrors are used. |
3335 | */ | 3335 | */ |
3336 | if (num_tolerated_disk_barrier_failures > 0 && | 3336 | if (num_tolerated_disk_barrier_failures > 0 && |
3337 | ((flags & (BTRFS_BLOCK_GROUP_DUP | | 3337 | ((flags & (BTRFS_BLOCK_GROUP_DUP | |
3338 | BTRFS_BLOCK_GROUP_RAID0)) || | 3338 | BTRFS_BLOCK_GROUP_RAID0)) || |
3339 | ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) | 3339 | ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) |
3340 | == 0))) | 3340 | == 0))) |
3341 | num_tolerated_disk_barrier_failures = 0; | 3341 | num_tolerated_disk_barrier_failures = 0; |
3342 | else if (num_tolerated_disk_barrier_failures > 1) { | 3342 | else if (num_tolerated_disk_barrier_failures > 1) { |
3343 | if (flags & (BTRFS_BLOCK_GROUP_RAID1 | | 3343 | if (flags & (BTRFS_BLOCK_GROUP_RAID1 | |
3344 | BTRFS_BLOCK_GROUP_RAID5 | | 3344 | BTRFS_BLOCK_GROUP_RAID5 | |
3345 | BTRFS_BLOCK_GROUP_RAID10)) { | 3345 | BTRFS_BLOCK_GROUP_RAID10)) { |
3346 | num_tolerated_disk_barrier_failures = 1; | 3346 | num_tolerated_disk_barrier_failures = 1; |
3347 | } else if (flags & | 3347 | } else if (flags & |
3348 | BTRFS_BLOCK_GROUP_RAID6) { | 3348 | BTRFS_BLOCK_GROUP_RAID6) { |
3349 | num_tolerated_disk_barrier_failures = 2; | 3349 | num_tolerated_disk_barrier_failures = 2; |
3350 | } | 3350 | } |
3351 | } | 3351 | } |
3352 | } | 3352 | } |
3353 | } | 3353 | } |
3354 | up_read(&sinfo->groups_sem); | 3354 | up_read(&sinfo->groups_sem); |
3355 | } | 3355 | } |
3356 | 3356 | ||
3357 | return num_tolerated_disk_barrier_failures; | 3357 | return num_tolerated_disk_barrier_failures; |
3358 | } | 3358 | } |
3359 | 3359 | ||
3360 | static int write_all_supers(struct btrfs_root *root, int max_mirrors) | 3360 | static int write_all_supers(struct btrfs_root *root, int max_mirrors) |
3361 | { | 3361 | { |
3362 | struct list_head *head; | 3362 | struct list_head *head; |
3363 | struct btrfs_device *dev; | 3363 | struct btrfs_device *dev; |
3364 | struct btrfs_super_block *sb; | 3364 | struct btrfs_super_block *sb; |
3365 | struct btrfs_dev_item *dev_item; | 3365 | struct btrfs_dev_item *dev_item; |
3366 | int ret; | 3366 | int ret; |
3367 | int do_barriers; | 3367 | int do_barriers; |
3368 | int max_errors; | 3368 | int max_errors; |
3369 | int total_errors = 0; | 3369 | int total_errors = 0; |
3370 | u64 flags; | 3370 | u64 flags; |
3371 | 3371 | ||
3372 | do_barriers = !btrfs_test_opt(root, NOBARRIER); | 3372 | do_barriers = !btrfs_test_opt(root, NOBARRIER); |
3373 | backup_super_roots(root->fs_info); | 3373 | backup_super_roots(root->fs_info); |
3374 | 3374 | ||
3375 | sb = root->fs_info->super_for_commit; | 3375 | sb = root->fs_info->super_for_commit; |
3376 | dev_item = &sb->dev_item; | 3376 | dev_item = &sb->dev_item; |
3377 | 3377 | ||
3378 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 3378 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
3379 | head = &root->fs_info->fs_devices->devices; | 3379 | head = &root->fs_info->fs_devices->devices; |
3380 | max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1; | 3380 | max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1; |
3381 | 3381 | ||
3382 | if (do_barriers) { | 3382 | if (do_barriers) { |
3383 | ret = barrier_all_devices(root->fs_info); | 3383 | ret = barrier_all_devices(root->fs_info); |
3384 | if (ret) { | 3384 | if (ret) { |
3385 | mutex_unlock( | 3385 | mutex_unlock( |
3386 | &root->fs_info->fs_devices->device_list_mutex); | 3386 | &root->fs_info->fs_devices->device_list_mutex); |
3387 | btrfs_error(root->fs_info, ret, | 3387 | btrfs_error(root->fs_info, ret, |
3388 | "errors while submitting device barriers."); | 3388 | "errors while submitting device barriers."); |
3389 | return ret; | 3389 | return ret; |
3390 | } | 3390 | } |
3391 | } | 3391 | } |
3392 | 3392 | ||
3393 | list_for_each_entry_rcu(dev, head, dev_list) { | 3393 | list_for_each_entry_rcu(dev, head, dev_list) { |
3394 | if (!dev->bdev) { | 3394 | if (!dev->bdev) { |
3395 | total_errors++; | 3395 | total_errors++; |
3396 | continue; | 3396 | continue; |
3397 | } | 3397 | } |
3398 | if (!dev->in_fs_metadata || !dev->writeable) | 3398 | if (!dev->in_fs_metadata || !dev->writeable) |
3399 | continue; | 3399 | continue; |
3400 | 3400 | ||
3401 | btrfs_set_stack_device_generation(dev_item, 0); | 3401 | btrfs_set_stack_device_generation(dev_item, 0); |
3402 | btrfs_set_stack_device_type(dev_item, dev->type); | 3402 | btrfs_set_stack_device_type(dev_item, dev->type); |
3403 | btrfs_set_stack_device_id(dev_item, dev->devid); | 3403 | btrfs_set_stack_device_id(dev_item, dev->devid); |
3404 | btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes); | 3404 | btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes); |
3405 | btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used); | 3405 | btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used); |
3406 | btrfs_set_stack_device_io_align(dev_item, dev->io_align); | 3406 | btrfs_set_stack_device_io_align(dev_item, dev->io_align); |
3407 | btrfs_set_stack_device_io_width(dev_item, dev->io_width); | 3407 | btrfs_set_stack_device_io_width(dev_item, dev->io_width); |
3408 | btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); | 3408 | btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); |
3409 | memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); | 3409 | memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); |
3410 | memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE); | 3410 | memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE); |
3411 | 3411 | ||
3412 | flags = btrfs_super_flags(sb); | 3412 | flags = btrfs_super_flags(sb); |
3413 | btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); | 3413 | btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); |
3414 | 3414 | ||
3415 | ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors); | 3415 | ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors); |
3416 | if (ret) | 3416 | if (ret) |
3417 | total_errors++; | 3417 | total_errors++; |
3418 | } | 3418 | } |
3419 | if (total_errors > max_errors) { | 3419 | if (total_errors > max_errors) { |
3420 | btrfs_err(root->fs_info, "%d errors while writing supers", | 3420 | btrfs_err(root->fs_info, "%d errors while writing supers", |
3421 | total_errors); | 3421 | total_errors); |
3422 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | 3422 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
3423 | 3423 | ||
3424 | /* FUA is masked off if unsupported and can't be the reason */ | 3424 | /* FUA is masked off if unsupported and can't be the reason */ |
3425 | btrfs_error(root->fs_info, -EIO, | 3425 | btrfs_error(root->fs_info, -EIO, |
3426 | "%d errors while writing supers", total_errors); | 3426 | "%d errors while writing supers", total_errors); |
3427 | return -EIO; | 3427 | return -EIO; |
3428 | } | 3428 | } |
3429 | 3429 | ||
3430 | total_errors = 0; | 3430 | total_errors = 0; |
3431 | list_for_each_entry_rcu(dev, head, dev_list) { | 3431 | list_for_each_entry_rcu(dev, head, dev_list) { |
3432 | if (!dev->bdev) | 3432 | if (!dev->bdev) |
3433 | continue; | 3433 | continue; |
3434 | if (!dev->in_fs_metadata || !dev->writeable) | 3434 | if (!dev->in_fs_metadata || !dev->writeable) |
3435 | continue; | 3435 | continue; |
3436 | 3436 | ||
3437 | ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors); | 3437 | ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors); |
3438 | if (ret) | 3438 | if (ret) |
3439 | total_errors++; | 3439 | total_errors++; |
3440 | } | 3440 | } |
3441 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | 3441 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
3442 | if (total_errors > max_errors) { | 3442 | if (total_errors > max_errors) { |
3443 | btrfs_error(root->fs_info, -EIO, | 3443 | btrfs_error(root->fs_info, -EIO, |
3444 | "%d errors while writing supers", total_errors); | 3444 | "%d errors while writing supers", total_errors); |
3445 | return -EIO; | 3445 | return -EIO; |
3446 | } | 3446 | } |
3447 | return 0; | 3447 | return 0; |
3448 | } | 3448 | } |
3449 | 3449 | ||
3450 | int write_ctree_super(struct btrfs_trans_handle *trans, | 3450 | int write_ctree_super(struct btrfs_trans_handle *trans, |
3451 | struct btrfs_root *root, int max_mirrors) | 3451 | struct btrfs_root *root, int max_mirrors) |
3452 | { | 3452 | { |
3453 | return write_all_supers(root, max_mirrors); | 3453 | return write_all_supers(root, max_mirrors); |
3454 | } | 3454 | } |
3455 | 3455 | ||
3456 | /* Drop a fs root from the radix tree and free it. */ | 3456 | /* Drop a fs root from the radix tree and free it. */ |
3457 | void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, | 3457 | void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, |
3458 | struct btrfs_root *root) | 3458 | struct btrfs_root *root) |
3459 | { | 3459 | { |
3460 | spin_lock(&fs_info->fs_roots_radix_lock); | 3460 | spin_lock(&fs_info->fs_roots_radix_lock); |
3461 | radix_tree_delete(&fs_info->fs_roots_radix, | 3461 | radix_tree_delete(&fs_info->fs_roots_radix, |
3462 | (unsigned long)root->root_key.objectid); | 3462 | (unsigned long)root->root_key.objectid); |
3463 | spin_unlock(&fs_info->fs_roots_radix_lock); | 3463 | spin_unlock(&fs_info->fs_roots_radix_lock); |
3464 | 3464 | ||
3465 | if (btrfs_root_refs(&root->root_item) == 0) | 3465 | if (btrfs_root_refs(&root->root_item) == 0) |
3466 | synchronize_srcu(&fs_info->subvol_srcu); | 3466 | synchronize_srcu(&fs_info->subvol_srcu); |
3467 | 3467 | ||
3468 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) | 3468 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) |
3469 | btrfs_free_log(NULL, root); | 3469 | btrfs_free_log(NULL, root); |
3470 | 3470 | ||
3471 | __btrfs_remove_free_space_cache(root->free_ino_pinned); | 3471 | __btrfs_remove_free_space_cache(root->free_ino_pinned); |
3472 | __btrfs_remove_free_space_cache(root->free_ino_ctl); | 3472 | __btrfs_remove_free_space_cache(root->free_ino_ctl); |
3473 | free_fs_root(root); | 3473 | free_fs_root(root); |
3474 | } | 3474 | } |
3475 | 3475 | ||
3476 | static void free_fs_root(struct btrfs_root *root) | 3476 | static void free_fs_root(struct btrfs_root *root) |
3477 | { | 3477 | { |
3478 | iput(root->cache_inode); | 3478 | iput(root->cache_inode); |
3479 | WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); | 3479 | WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); |
3480 | btrfs_free_block_rsv(root, root->orphan_block_rsv); | 3480 | btrfs_free_block_rsv(root, root->orphan_block_rsv); |
3481 | root->orphan_block_rsv = NULL; | 3481 | root->orphan_block_rsv = NULL; |
3482 | if (root->anon_dev) | 3482 | if (root->anon_dev) |
3483 | free_anon_bdev(root->anon_dev); | 3483 | free_anon_bdev(root->anon_dev); |
3484 | free_extent_buffer(root->node); | 3484 | free_extent_buffer(root->node); |
3485 | free_extent_buffer(root->commit_root); | 3485 | free_extent_buffer(root->commit_root); |
3486 | kfree(root->free_ino_ctl); | 3486 | kfree(root->free_ino_ctl); |
3487 | kfree(root->free_ino_pinned); | 3487 | kfree(root->free_ino_pinned); |
3488 | kfree(root->name); | 3488 | kfree(root->name); |
3489 | btrfs_put_fs_root(root); | 3489 | btrfs_put_fs_root(root); |
3490 | } | 3490 | } |
3491 | 3491 | ||
3492 | void btrfs_free_fs_root(struct btrfs_root *root) | 3492 | void btrfs_free_fs_root(struct btrfs_root *root) |
3493 | { | 3493 | { |
3494 | free_fs_root(root); | 3494 | free_fs_root(root); |
3495 | } | 3495 | } |
3496 | 3496 | ||
3497 | int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) | 3497 | int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) |
3498 | { | 3498 | { |
3499 | u64 root_objectid = 0; | 3499 | u64 root_objectid = 0; |
3500 | struct btrfs_root *gang[8]; | 3500 | struct btrfs_root *gang[8]; |
3501 | int i; | 3501 | int i; |
3502 | int ret; | 3502 | int ret; |
3503 | 3503 | ||
3504 | while (1) { | 3504 | while (1) { |
3505 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, | 3505 | ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, |
3506 | (void **)gang, root_objectid, | 3506 | (void **)gang, root_objectid, |
3507 | ARRAY_SIZE(gang)); | 3507 | ARRAY_SIZE(gang)); |
3508 | if (!ret) | 3508 | if (!ret) |
3509 | break; | 3509 | break; |
3510 | 3510 | ||
3511 | root_objectid = gang[ret - 1]->root_key.objectid + 1; | 3511 | root_objectid = gang[ret - 1]->root_key.objectid + 1; |
3512 | for (i = 0; i < ret; i++) { | 3512 | for (i = 0; i < ret; i++) { |
3513 | int err; | 3513 | int err; |
3514 | 3514 | ||
3515 | root_objectid = gang[i]->root_key.objectid; | 3515 | root_objectid = gang[i]->root_key.objectid; |
3516 | err = btrfs_orphan_cleanup(gang[i]); | 3516 | err = btrfs_orphan_cleanup(gang[i]); |
3517 | if (err) | 3517 | if (err) |
3518 | return err; | 3518 | return err; |
3519 | } | 3519 | } |
3520 | root_objectid++; | 3520 | root_objectid++; |
3521 | } | 3521 | } |
3522 | return 0; | 3522 | return 0; |
3523 | } | 3523 | } |
3524 | 3524 | ||
3525 | int btrfs_commit_super(struct btrfs_root *root) | 3525 | int btrfs_commit_super(struct btrfs_root *root) |
3526 | { | 3526 | { |
3527 | struct btrfs_trans_handle *trans; | 3527 | struct btrfs_trans_handle *trans; |
3528 | 3528 | ||
3529 | mutex_lock(&root->fs_info->cleaner_mutex); | 3529 | mutex_lock(&root->fs_info->cleaner_mutex); |
3530 | btrfs_run_delayed_iputs(root); | 3530 | btrfs_run_delayed_iputs(root); |
3531 | mutex_unlock(&root->fs_info->cleaner_mutex); | 3531 | mutex_unlock(&root->fs_info->cleaner_mutex); |
3532 | wake_up_process(root->fs_info->cleaner_kthread); | 3532 | wake_up_process(root->fs_info->cleaner_kthread); |
3533 | 3533 | ||
3534 | /* wait until ongoing cleanup work done */ | 3534 | /* wait until ongoing cleanup work done */ |
3535 | down_write(&root->fs_info->cleanup_work_sem); | 3535 | down_write(&root->fs_info->cleanup_work_sem); |
3536 | up_write(&root->fs_info->cleanup_work_sem); | 3536 | up_write(&root->fs_info->cleanup_work_sem); |
3537 | 3537 | ||
3538 | trans = btrfs_join_transaction(root); | 3538 | trans = btrfs_join_transaction(root); |
3539 | if (IS_ERR(trans)) | 3539 | if (IS_ERR(trans)) |
3540 | return PTR_ERR(trans); | 3540 | return PTR_ERR(trans); |
3541 | return btrfs_commit_transaction(trans, root); | 3541 | return btrfs_commit_transaction(trans, root); |
3542 | } | 3542 | } |
3543 | 3543 | ||
3544 | int close_ctree(struct btrfs_root *root) | 3544 | int close_ctree(struct btrfs_root *root) |
3545 | { | 3545 | { |
3546 | struct btrfs_fs_info *fs_info = root->fs_info; | 3546 | struct btrfs_fs_info *fs_info = root->fs_info; |
3547 | int ret; | 3547 | int ret; |
3548 | 3548 | ||
3549 | fs_info->closing = 1; | 3549 | fs_info->closing = 1; |
3550 | smp_mb(); | 3550 | smp_mb(); |
3551 | 3551 | ||
3552 | /* wait for the uuid_scan task to finish */ | 3552 | /* wait for the uuid_scan task to finish */ |
3553 | down(&fs_info->uuid_tree_rescan_sem); | 3553 | down(&fs_info->uuid_tree_rescan_sem); |
3554 | /* avoid complains from lockdep et al., set sem back to initial state */ | 3554 | /* avoid complains from lockdep et al., set sem back to initial state */ |
3555 | up(&fs_info->uuid_tree_rescan_sem); | 3555 | up(&fs_info->uuid_tree_rescan_sem); |
3556 | 3556 | ||
3557 | /* pause restriper - we want to resume on mount */ | 3557 | /* pause restriper - we want to resume on mount */ |
3558 | btrfs_pause_balance(fs_info); | 3558 | btrfs_pause_balance(fs_info); |
3559 | 3559 | ||
3560 | btrfs_dev_replace_suspend_for_unmount(fs_info); | 3560 | btrfs_dev_replace_suspend_for_unmount(fs_info); |
3561 | 3561 | ||
3562 | btrfs_scrub_cancel(fs_info); | 3562 | btrfs_scrub_cancel(fs_info); |
3563 | 3563 | ||
3564 | /* wait for any defraggers to finish */ | 3564 | /* wait for any defraggers to finish */ |
3565 | wait_event(fs_info->transaction_wait, | 3565 | wait_event(fs_info->transaction_wait, |
3566 | (atomic_read(&fs_info->defrag_running) == 0)); | 3566 | (atomic_read(&fs_info->defrag_running) == 0)); |
3567 | 3567 | ||
3568 | /* clear out the rbtree of defraggable inodes */ | 3568 | /* clear out the rbtree of defraggable inodes */ |
3569 | btrfs_cleanup_defrag_inodes(fs_info); | 3569 | btrfs_cleanup_defrag_inodes(fs_info); |
3570 | 3570 | ||
3571 | if (!(fs_info->sb->s_flags & MS_RDONLY)) { | 3571 | if (!(fs_info->sb->s_flags & MS_RDONLY)) { |
3572 | ret = btrfs_commit_super(root); | 3572 | ret = btrfs_commit_super(root); |
3573 | if (ret) | 3573 | if (ret) |
3574 | btrfs_err(root->fs_info, "commit super ret %d", ret); | 3574 | btrfs_err(root->fs_info, "commit super ret %d", ret); |
3575 | } | 3575 | } |
3576 | 3576 | ||
3577 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) | 3577 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) |
3578 | btrfs_error_commit_super(root); | 3578 | btrfs_error_commit_super(root); |
3579 | 3579 | ||
3580 | kthread_stop(fs_info->transaction_kthread); | 3580 | kthread_stop(fs_info->transaction_kthread); |
3581 | kthread_stop(fs_info->cleaner_kthread); | 3581 | kthread_stop(fs_info->cleaner_kthread); |
3582 | 3582 | ||
3583 | fs_info->closing = 2; | 3583 | fs_info->closing = 2; |
3584 | smp_mb(); | 3584 | smp_mb(); |
3585 | 3585 | ||
3586 | btrfs_free_qgroup_config(root->fs_info); | 3586 | btrfs_free_qgroup_config(root->fs_info); |
3587 | 3587 | ||
3588 | if (percpu_counter_sum(&fs_info->delalloc_bytes)) { | 3588 | if (percpu_counter_sum(&fs_info->delalloc_bytes)) { |
3589 | btrfs_info(root->fs_info, "at unmount delalloc count %lld", | 3589 | btrfs_info(root->fs_info, "at unmount delalloc count %lld", |
3590 | percpu_counter_sum(&fs_info->delalloc_bytes)); | 3590 | percpu_counter_sum(&fs_info->delalloc_bytes)); |
3591 | } | 3591 | } |
3592 | 3592 | ||
3593 | btrfs_sysfs_remove_one(fs_info); | 3593 | btrfs_sysfs_remove_one(fs_info); |
3594 | 3594 | ||
3595 | del_fs_roots(fs_info); | 3595 | del_fs_roots(fs_info); |
3596 | 3596 | ||
3597 | btrfs_put_block_group_cache(fs_info); | 3597 | btrfs_put_block_group_cache(fs_info); |
3598 | 3598 | ||
3599 | btrfs_free_block_groups(fs_info); | 3599 | btrfs_free_block_groups(fs_info); |
3600 | 3600 | ||
3601 | /* | 3601 | /* |
3602 | * we must make sure there is not any read request to | 3602 | * we must make sure there is not any read request to |
3603 | * submit after we stopping all workers. | 3603 | * submit after we stopping all workers. |
3604 | */ | 3604 | */ |
3605 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); | 3605 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); |
3606 | btrfs_stop_all_workers(fs_info); | 3606 | btrfs_stop_all_workers(fs_info); |
3607 | 3607 | ||
3608 | free_root_pointers(fs_info, 1); | 3608 | free_root_pointers(fs_info, 1); |
3609 | 3609 | ||
3610 | iput(fs_info->btree_inode); | 3610 | iput(fs_info->btree_inode); |
3611 | 3611 | ||
3612 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY | 3612 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
3613 | if (btrfs_test_opt(root, CHECK_INTEGRITY)) | 3613 | if (btrfs_test_opt(root, CHECK_INTEGRITY)) |
3614 | btrfsic_unmount(root, fs_info->fs_devices); | 3614 | btrfsic_unmount(root, fs_info->fs_devices); |
3615 | #endif | 3615 | #endif |
3616 | 3616 | ||
3617 | btrfs_close_devices(fs_info->fs_devices); | 3617 | btrfs_close_devices(fs_info->fs_devices); |
3618 | btrfs_mapping_tree_free(&fs_info->mapping_tree); | 3618 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
3619 | 3619 | ||
3620 | percpu_counter_destroy(&fs_info->dirty_metadata_bytes); | 3620 | percpu_counter_destroy(&fs_info->dirty_metadata_bytes); |
3621 | percpu_counter_destroy(&fs_info->delalloc_bytes); | 3621 | percpu_counter_destroy(&fs_info->delalloc_bytes); |
3622 | bdi_destroy(&fs_info->bdi); | 3622 | bdi_destroy(&fs_info->bdi); |
3623 | cleanup_srcu_struct(&fs_info->subvol_srcu); | 3623 | cleanup_srcu_struct(&fs_info->subvol_srcu); |
3624 | 3624 | ||
3625 | btrfs_free_stripe_hash_table(fs_info); | 3625 | btrfs_free_stripe_hash_table(fs_info); |
3626 | 3626 | ||
3627 | btrfs_free_block_rsv(root, root->orphan_block_rsv); | 3627 | btrfs_free_block_rsv(root, root->orphan_block_rsv); |
3628 | root->orphan_block_rsv = NULL; | 3628 | root->orphan_block_rsv = NULL; |
3629 | 3629 | ||
3630 | return 0; | 3630 | return 0; |
3631 | } | 3631 | } |
3632 | 3632 | ||
3633 | int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, | 3633 | int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, |
3634 | int atomic) | 3634 | int atomic) |
3635 | { | 3635 | { |
3636 | int ret; | 3636 | int ret; |
3637 | struct inode *btree_inode = buf->pages[0]->mapping->host; | 3637 | struct inode *btree_inode = buf->pages[0]->mapping->host; |
3638 | 3638 | ||
3639 | ret = extent_buffer_uptodate(buf); | 3639 | ret = extent_buffer_uptodate(buf); |
3640 | if (!ret) | 3640 | if (!ret) |
3641 | return ret; | 3641 | return ret; |
3642 | 3642 | ||
3643 | ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, | 3643 | ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, |
3644 | parent_transid, atomic); | 3644 | parent_transid, atomic); |
3645 | if (ret == -EAGAIN) | 3645 | if (ret == -EAGAIN) |
3646 | return ret; | 3646 | return ret; |
3647 | return !ret; | 3647 | return !ret; |
3648 | } | 3648 | } |
3649 | 3649 | ||
3650 | int btrfs_set_buffer_uptodate(struct extent_buffer *buf) | 3650 | int btrfs_set_buffer_uptodate(struct extent_buffer *buf) |
3651 | { | 3651 | { |
3652 | return set_extent_buffer_uptodate(buf); | 3652 | return set_extent_buffer_uptodate(buf); |
3653 | } | 3653 | } |
3654 | 3654 | ||
3655 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf) | 3655 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf) |
3656 | { | 3656 | { |
3657 | struct btrfs_root *root; | 3657 | struct btrfs_root *root; |
3658 | u64 transid = btrfs_header_generation(buf); | 3658 | u64 transid = btrfs_header_generation(buf); |
3659 | int was_dirty; | 3659 | int was_dirty; |
3660 | 3660 | ||
3661 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 3661 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
3662 | /* | 3662 | /* |
3663 | * This is a fast path so only do this check if we have sanity tests | 3663 | * This is a fast path so only do this check if we have sanity tests |
3664 | * enabled. Normal people shouldn't be marking dummy buffers as dirty | 3664 | * enabled. Normal people shouldn't be marking dummy buffers as dirty |
3665 | * outside of the sanity tests. | 3665 | * outside of the sanity tests. |
3666 | */ | 3666 | */ |
3667 | if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags))) | 3667 | if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags))) |
3668 | return; | 3668 | return; |
3669 | #endif | 3669 | #endif |
3670 | root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 3670 | root = BTRFS_I(buf->pages[0]->mapping->host)->root; |
3671 | btrfs_assert_tree_locked(buf); | 3671 | btrfs_assert_tree_locked(buf); |
3672 | if (transid != root->fs_info->generation) | 3672 | if (transid != root->fs_info->generation) |
3673 | WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, " | 3673 | WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, " |
3674 | "found %llu running %llu\n", | 3674 | "found %llu running %llu\n", |
3675 | buf->start, transid, root->fs_info->generation); | 3675 | buf->start, transid, root->fs_info->generation); |
3676 | was_dirty = set_extent_buffer_dirty(buf); | 3676 | was_dirty = set_extent_buffer_dirty(buf); |
3677 | if (!was_dirty) | 3677 | if (!was_dirty) |
3678 | __percpu_counter_add(&root->fs_info->dirty_metadata_bytes, | 3678 | __percpu_counter_add(&root->fs_info->dirty_metadata_bytes, |
3679 | buf->len, | 3679 | buf->len, |
3680 | root->fs_info->dirty_metadata_batch); | 3680 | root->fs_info->dirty_metadata_batch); |
3681 | } | 3681 | } |
3682 | 3682 | ||
3683 | static void __btrfs_btree_balance_dirty(struct btrfs_root *root, | 3683 | static void __btrfs_btree_balance_dirty(struct btrfs_root *root, |
3684 | int flush_delayed) | 3684 | int flush_delayed) |
3685 | { | 3685 | { |
3686 | /* | 3686 | /* |
3687 | * looks as though older kernels can get into trouble with | 3687 | * looks as though older kernels can get into trouble with |
3688 | * this code, they end up stuck in balance_dirty_pages forever | 3688 | * this code, they end up stuck in balance_dirty_pages forever |
3689 | */ | 3689 | */ |
3690 | int ret; | 3690 | int ret; |
3691 | 3691 | ||
3692 | if (current->flags & PF_MEMALLOC) | 3692 | if (current->flags & PF_MEMALLOC) |
3693 | return; | 3693 | return; |
3694 | 3694 | ||
3695 | if (flush_delayed) | 3695 | if (flush_delayed) |
3696 | btrfs_balance_delayed_items(root); | 3696 | btrfs_balance_delayed_items(root); |
3697 | 3697 | ||
3698 | ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes, | 3698 | ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes, |
3699 | BTRFS_DIRTY_METADATA_THRESH); | 3699 | BTRFS_DIRTY_METADATA_THRESH); |
3700 | if (ret > 0) { | 3700 | if (ret > 0) { |
3701 | balance_dirty_pages_ratelimited( | 3701 | balance_dirty_pages_ratelimited( |
3702 | root->fs_info->btree_inode->i_mapping); | 3702 | root->fs_info->btree_inode->i_mapping); |
3703 | } | 3703 | } |
3704 | return; | 3704 | return; |
3705 | } | 3705 | } |
3706 | 3706 | ||
3707 | void btrfs_btree_balance_dirty(struct btrfs_root *root) | 3707 | void btrfs_btree_balance_dirty(struct btrfs_root *root) |
3708 | { | 3708 | { |
3709 | __btrfs_btree_balance_dirty(root, 1); | 3709 | __btrfs_btree_balance_dirty(root, 1); |
3710 | } | 3710 | } |
3711 | 3711 | ||
3712 | void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root) | 3712 | void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root) |
3713 | { | 3713 | { |
3714 | __btrfs_btree_balance_dirty(root, 0); | 3714 | __btrfs_btree_balance_dirty(root, 0); |
3715 | } | 3715 | } |
3716 | 3716 | ||
3717 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) | 3717 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) |
3718 | { | 3718 | { |
3719 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; | 3719 | struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; |
3720 | return btree_read_extent_buffer_pages(root, buf, 0, parent_transid); | 3720 | return btree_read_extent_buffer_pages(root, buf, 0, parent_transid); |
3721 | } | 3721 | } |
3722 | 3722 | ||
3723 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | 3723 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, |
3724 | int read_only) | 3724 | int read_only) |
3725 | { | 3725 | { |
3726 | /* | 3726 | /* |
3727 | * Placeholder for checks | 3727 | * Placeholder for checks |
3728 | */ | 3728 | */ |
3729 | return 0; | 3729 | return 0; |
3730 | } | 3730 | } |
3731 | 3731 | ||
3732 | static void btrfs_error_commit_super(struct btrfs_root *root) | 3732 | static void btrfs_error_commit_super(struct btrfs_root *root) |
3733 | { | 3733 | { |
3734 | mutex_lock(&root->fs_info->cleaner_mutex); | 3734 | mutex_lock(&root->fs_info->cleaner_mutex); |
3735 | btrfs_run_delayed_iputs(root); | 3735 | btrfs_run_delayed_iputs(root); |
3736 | mutex_unlock(&root->fs_info->cleaner_mutex); | 3736 | mutex_unlock(&root->fs_info->cleaner_mutex); |
3737 | 3737 | ||
3738 | down_write(&root->fs_info->cleanup_work_sem); | 3738 | down_write(&root->fs_info->cleanup_work_sem); |
3739 | up_write(&root->fs_info->cleanup_work_sem); | 3739 | up_write(&root->fs_info->cleanup_work_sem); |
3740 | 3740 | ||
3741 | /* cleanup FS via transaction */ | 3741 | /* cleanup FS via transaction */ |
3742 | btrfs_cleanup_transaction(root); | 3742 | btrfs_cleanup_transaction(root); |
3743 | } | 3743 | } |
3744 | 3744 | ||
3745 | static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, | 3745 | static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, |
3746 | struct btrfs_root *root) | 3746 | struct btrfs_root *root) |
3747 | { | 3747 | { |
3748 | struct btrfs_inode *btrfs_inode; | 3748 | struct btrfs_inode *btrfs_inode; |
3749 | struct list_head splice; | 3749 | struct list_head splice; |
3750 | 3750 | ||
3751 | INIT_LIST_HEAD(&splice); | 3751 | INIT_LIST_HEAD(&splice); |
3752 | 3752 | ||
3753 | mutex_lock(&root->fs_info->ordered_operations_mutex); | 3753 | mutex_lock(&root->fs_info->ordered_operations_mutex); |
3754 | spin_lock(&root->fs_info->ordered_root_lock); | 3754 | spin_lock(&root->fs_info->ordered_root_lock); |
3755 | 3755 | ||
3756 | list_splice_init(&t->ordered_operations, &splice); | 3756 | list_splice_init(&t->ordered_operations, &splice); |
3757 | while (!list_empty(&splice)) { | 3757 | while (!list_empty(&splice)) { |
3758 | btrfs_inode = list_entry(splice.next, struct btrfs_inode, | 3758 | btrfs_inode = list_entry(splice.next, struct btrfs_inode, |
3759 | ordered_operations); | 3759 | ordered_operations); |
3760 | 3760 | ||
3761 | list_del_init(&btrfs_inode->ordered_operations); | 3761 | list_del_init(&btrfs_inode->ordered_operations); |
3762 | spin_unlock(&root->fs_info->ordered_root_lock); | 3762 | spin_unlock(&root->fs_info->ordered_root_lock); |
3763 | 3763 | ||
3764 | btrfs_invalidate_inodes(btrfs_inode->root); | 3764 | btrfs_invalidate_inodes(btrfs_inode->root); |
3765 | 3765 | ||
3766 | spin_lock(&root->fs_info->ordered_root_lock); | 3766 | spin_lock(&root->fs_info->ordered_root_lock); |
3767 | } | 3767 | } |
3768 | 3768 | ||
3769 | spin_unlock(&root->fs_info->ordered_root_lock); | 3769 | spin_unlock(&root->fs_info->ordered_root_lock); |
3770 | mutex_unlock(&root->fs_info->ordered_operations_mutex); | 3770 | mutex_unlock(&root->fs_info->ordered_operations_mutex); |
3771 | } | 3771 | } |
3772 | 3772 | ||
3773 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) | 3773 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) |
3774 | { | 3774 | { |
3775 | struct btrfs_ordered_extent *ordered; | 3775 | struct btrfs_ordered_extent *ordered; |
3776 | 3776 | ||
3777 | spin_lock(&root->ordered_extent_lock); | 3777 | spin_lock(&root->ordered_extent_lock); |
3778 | /* | 3778 | /* |
3779 | * This will just short circuit the ordered completion stuff which will | 3779 | * This will just short circuit the ordered completion stuff which will |
3780 | * make sure the ordered extent gets properly cleaned up. | 3780 | * make sure the ordered extent gets properly cleaned up. |
3781 | */ | 3781 | */ |
3782 | list_for_each_entry(ordered, &root->ordered_extents, | 3782 | list_for_each_entry(ordered, &root->ordered_extents, |
3783 | root_extent_list) | 3783 | root_extent_list) |
3784 | set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); | 3784 | set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); |
3785 | spin_unlock(&root->ordered_extent_lock); | 3785 | spin_unlock(&root->ordered_extent_lock); |
3786 | } | 3786 | } |
3787 | 3787 | ||
3788 | static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) | 3788 | static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) |
3789 | { | 3789 | { |
3790 | struct btrfs_root *root; | 3790 | struct btrfs_root *root; |
3791 | struct list_head splice; | 3791 | struct list_head splice; |
3792 | 3792 | ||
3793 | INIT_LIST_HEAD(&splice); | 3793 | INIT_LIST_HEAD(&splice); |
3794 | 3794 | ||
3795 | spin_lock(&fs_info->ordered_root_lock); | 3795 | spin_lock(&fs_info->ordered_root_lock); |
3796 | list_splice_init(&fs_info->ordered_roots, &splice); | 3796 | list_splice_init(&fs_info->ordered_roots, &splice); |
3797 | while (!list_empty(&splice)) { | 3797 | while (!list_empty(&splice)) { |
3798 | root = list_first_entry(&splice, struct btrfs_root, | 3798 | root = list_first_entry(&splice, struct btrfs_root, |
3799 | ordered_root); | 3799 | ordered_root); |
3800 | list_move_tail(&root->ordered_root, | 3800 | list_move_tail(&root->ordered_root, |
3801 | &fs_info->ordered_roots); | 3801 | &fs_info->ordered_roots); |
3802 | 3802 | ||
3803 | btrfs_destroy_ordered_extents(root); | 3803 | btrfs_destroy_ordered_extents(root); |
3804 | 3804 | ||
3805 | cond_resched_lock(&fs_info->ordered_root_lock); | 3805 | cond_resched_lock(&fs_info->ordered_root_lock); |
3806 | } | 3806 | } |
3807 | spin_unlock(&fs_info->ordered_root_lock); | 3807 | spin_unlock(&fs_info->ordered_root_lock); |
3808 | } | 3808 | } |
3809 | 3809 | ||
3810 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | 3810 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
3811 | struct btrfs_root *root) | 3811 | struct btrfs_root *root) |
3812 | { | 3812 | { |
3813 | struct rb_node *node; | 3813 | struct rb_node *node; |
3814 | struct btrfs_delayed_ref_root *delayed_refs; | 3814 | struct btrfs_delayed_ref_root *delayed_refs; |
3815 | struct btrfs_delayed_ref_node *ref; | 3815 | struct btrfs_delayed_ref_node *ref; |
3816 | int ret = 0; | 3816 | int ret = 0; |
3817 | 3817 | ||
3818 | delayed_refs = &trans->delayed_refs; | 3818 | delayed_refs = &trans->delayed_refs; |
3819 | 3819 | ||
3820 | spin_lock(&delayed_refs->lock); | 3820 | spin_lock(&delayed_refs->lock); |
3821 | if (atomic_read(&delayed_refs->num_entries) == 0) { | 3821 | if (atomic_read(&delayed_refs->num_entries) == 0) { |
3822 | spin_unlock(&delayed_refs->lock); | 3822 | spin_unlock(&delayed_refs->lock); |
3823 | btrfs_info(root->fs_info, "delayed_refs has NO entry"); | 3823 | btrfs_info(root->fs_info, "delayed_refs has NO entry"); |
3824 | return ret; | 3824 | return ret; |
3825 | } | 3825 | } |
3826 | 3826 | ||
3827 | while ((node = rb_first(&delayed_refs->href_root)) != NULL) { | 3827 | while ((node = rb_first(&delayed_refs->href_root)) != NULL) { |
3828 | struct btrfs_delayed_ref_head *head; | 3828 | struct btrfs_delayed_ref_head *head; |
3829 | bool pin_bytes = false; | 3829 | bool pin_bytes = false; |
3830 | 3830 | ||
3831 | head = rb_entry(node, struct btrfs_delayed_ref_head, | 3831 | head = rb_entry(node, struct btrfs_delayed_ref_head, |
3832 | href_node); | 3832 | href_node); |
3833 | if (!mutex_trylock(&head->mutex)) { | 3833 | if (!mutex_trylock(&head->mutex)) { |
3834 | atomic_inc(&head->node.refs); | 3834 | atomic_inc(&head->node.refs); |
3835 | spin_unlock(&delayed_refs->lock); | 3835 | spin_unlock(&delayed_refs->lock); |
3836 | 3836 | ||
3837 | mutex_lock(&head->mutex); | 3837 | mutex_lock(&head->mutex); |
3838 | mutex_unlock(&head->mutex); | 3838 | mutex_unlock(&head->mutex); |
3839 | btrfs_put_delayed_ref(&head->node); | 3839 | btrfs_put_delayed_ref(&head->node); |
3840 | spin_lock(&delayed_refs->lock); | 3840 | spin_lock(&delayed_refs->lock); |
3841 | continue; | 3841 | continue; |
3842 | } | 3842 | } |
3843 | spin_lock(&head->lock); | 3843 | spin_lock(&head->lock); |
3844 | while ((node = rb_first(&head->ref_root)) != NULL) { | 3844 | while ((node = rb_first(&head->ref_root)) != NULL) { |
3845 | ref = rb_entry(node, struct btrfs_delayed_ref_node, | 3845 | ref = rb_entry(node, struct btrfs_delayed_ref_node, |
3846 | rb_node); | 3846 | rb_node); |
3847 | ref->in_tree = 0; | 3847 | ref->in_tree = 0; |
3848 | rb_erase(&ref->rb_node, &head->ref_root); | 3848 | rb_erase(&ref->rb_node, &head->ref_root); |
3849 | atomic_dec(&delayed_refs->num_entries); | 3849 | atomic_dec(&delayed_refs->num_entries); |
3850 | btrfs_put_delayed_ref(ref); | 3850 | btrfs_put_delayed_ref(ref); |
3851 | } | 3851 | } |
3852 | if (head->must_insert_reserved) | 3852 | if (head->must_insert_reserved) |
3853 | pin_bytes = true; | 3853 | pin_bytes = true; |
3854 | btrfs_free_delayed_extent_op(head->extent_op); | 3854 | btrfs_free_delayed_extent_op(head->extent_op); |
3855 | delayed_refs->num_heads--; | 3855 | delayed_refs->num_heads--; |
3856 | if (head->processing == 0) | 3856 | if (head->processing == 0) |
3857 | delayed_refs->num_heads_ready--; | 3857 | delayed_refs->num_heads_ready--; |
3858 | atomic_dec(&delayed_refs->num_entries); | 3858 | atomic_dec(&delayed_refs->num_entries); |
3859 | head->node.in_tree = 0; | 3859 | head->node.in_tree = 0; |
3860 | rb_erase(&head->href_node, &delayed_refs->href_root); | 3860 | rb_erase(&head->href_node, &delayed_refs->href_root); |
3861 | spin_unlock(&head->lock); | 3861 | spin_unlock(&head->lock); |
3862 | spin_unlock(&delayed_refs->lock); | 3862 | spin_unlock(&delayed_refs->lock); |
3863 | mutex_unlock(&head->mutex); | 3863 | mutex_unlock(&head->mutex); |
3864 | 3864 | ||
3865 | if (pin_bytes) | 3865 | if (pin_bytes) |
3866 | btrfs_pin_extent(root, head->node.bytenr, | 3866 | btrfs_pin_extent(root, head->node.bytenr, |
3867 | head->node.num_bytes, 1); | 3867 | head->node.num_bytes, 1); |
3868 | btrfs_put_delayed_ref(&head->node); | 3868 | btrfs_put_delayed_ref(&head->node); |
3869 | cond_resched(); | 3869 | cond_resched(); |
3870 | spin_lock(&delayed_refs->lock); | 3870 | spin_lock(&delayed_refs->lock); |
3871 | } | 3871 | } |
3872 | 3872 | ||
3873 | spin_unlock(&delayed_refs->lock); | 3873 | spin_unlock(&delayed_refs->lock); |
3874 | 3874 | ||
3875 | return ret; | 3875 | return ret; |
3876 | } | 3876 | } |
3877 | 3877 | ||
3878 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) | 3878 | static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) |
3879 | { | 3879 | { |
3880 | struct btrfs_inode *btrfs_inode; | 3880 | struct btrfs_inode *btrfs_inode; |
3881 | struct list_head splice; | 3881 | struct list_head splice; |
3882 | 3882 | ||
3883 | INIT_LIST_HEAD(&splice); | 3883 | INIT_LIST_HEAD(&splice); |
3884 | 3884 | ||
3885 | spin_lock(&root->delalloc_lock); | 3885 | spin_lock(&root->delalloc_lock); |
3886 | list_splice_init(&root->delalloc_inodes, &splice); | 3886 | list_splice_init(&root->delalloc_inodes, &splice); |
3887 | 3887 | ||
3888 | while (!list_empty(&splice)) { | 3888 | while (!list_empty(&splice)) { |
3889 | btrfs_inode = list_first_entry(&splice, struct btrfs_inode, | 3889 | btrfs_inode = list_first_entry(&splice, struct btrfs_inode, |
3890 | delalloc_inodes); | 3890 | delalloc_inodes); |
3891 | 3891 | ||
3892 | list_del_init(&btrfs_inode->delalloc_inodes); | 3892 | list_del_init(&btrfs_inode->delalloc_inodes); |
3893 | clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, | 3893 | clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
3894 | &btrfs_inode->runtime_flags); | 3894 | &btrfs_inode->runtime_flags); |
3895 | spin_unlock(&root->delalloc_lock); | 3895 | spin_unlock(&root->delalloc_lock); |
3896 | 3896 | ||
3897 | btrfs_invalidate_inodes(btrfs_inode->root); | 3897 | btrfs_invalidate_inodes(btrfs_inode->root); |
3898 | 3898 | ||
3899 | spin_lock(&root->delalloc_lock); | 3899 | spin_lock(&root->delalloc_lock); |
3900 | } | 3900 | } |
3901 | 3901 | ||
3902 | spin_unlock(&root->delalloc_lock); | 3902 | spin_unlock(&root->delalloc_lock); |
3903 | } | 3903 | } |
3904 | 3904 | ||
3905 | static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) | 3905 | static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) |
3906 | { | 3906 | { |
3907 | struct btrfs_root *root; | 3907 | struct btrfs_root *root; |
3908 | struct list_head splice; | 3908 | struct list_head splice; |
3909 | 3909 | ||
3910 | INIT_LIST_HEAD(&splice); | 3910 | INIT_LIST_HEAD(&splice); |
3911 | 3911 | ||
3912 | spin_lock(&fs_info->delalloc_root_lock); | 3912 | spin_lock(&fs_info->delalloc_root_lock); |
3913 | list_splice_init(&fs_info->delalloc_roots, &splice); | 3913 | list_splice_init(&fs_info->delalloc_roots, &splice); |
3914 | while (!list_empty(&splice)) { | 3914 | while (!list_empty(&splice)) { |
3915 | root = list_first_entry(&splice, struct btrfs_root, | 3915 | root = list_first_entry(&splice, struct btrfs_root, |
3916 | delalloc_root); | 3916 | delalloc_root); |
3917 | list_del_init(&root->delalloc_root); | 3917 | list_del_init(&root->delalloc_root); |
3918 | root = btrfs_grab_fs_root(root); | 3918 | root = btrfs_grab_fs_root(root); |
3919 | BUG_ON(!root); | 3919 | BUG_ON(!root); |
3920 | spin_unlock(&fs_info->delalloc_root_lock); | 3920 | spin_unlock(&fs_info->delalloc_root_lock); |
3921 | 3921 | ||
3922 | btrfs_destroy_delalloc_inodes(root); | 3922 | btrfs_destroy_delalloc_inodes(root); |
3923 | btrfs_put_fs_root(root); | 3923 | btrfs_put_fs_root(root); |
3924 | 3924 | ||
3925 | spin_lock(&fs_info->delalloc_root_lock); | 3925 | spin_lock(&fs_info->delalloc_root_lock); |
3926 | } | 3926 | } |
3927 | spin_unlock(&fs_info->delalloc_root_lock); | 3927 | spin_unlock(&fs_info->delalloc_root_lock); |
3928 | } | 3928 | } |
3929 | 3929 | ||
3930 | static int btrfs_destroy_marked_extents(struct btrfs_root *root, | 3930 | static int btrfs_destroy_marked_extents(struct btrfs_root *root, |
3931 | struct extent_io_tree *dirty_pages, | 3931 | struct extent_io_tree *dirty_pages, |
3932 | int mark) | 3932 | int mark) |
3933 | { | 3933 | { |
3934 | int ret; | 3934 | int ret; |
3935 | struct extent_buffer *eb; | 3935 | struct extent_buffer *eb; |
3936 | u64 start = 0; | 3936 | u64 start = 0; |
3937 | u64 end; | 3937 | u64 end; |
3938 | 3938 | ||
3939 | while (1) { | 3939 | while (1) { |
3940 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, | 3940 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, |
3941 | mark, NULL); | 3941 | mark, NULL); |
3942 | if (ret) | 3942 | if (ret) |
3943 | break; | 3943 | break; |
3944 | 3944 | ||
3945 | clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); | 3945 | clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); |
3946 | while (start <= end) { | 3946 | while (start <= end) { |
3947 | eb = btrfs_find_tree_block(root, start, | 3947 | eb = btrfs_find_tree_block(root, start, |
3948 | root->leafsize); | 3948 | root->leafsize); |
3949 | start += root->leafsize; | 3949 | start += root->leafsize; |
3950 | if (!eb) | 3950 | if (!eb) |
3951 | continue; | 3951 | continue; |
3952 | wait_on_extent_buffer_writeback(eb); | 3952 | wait_on_extent_buffer_writeback(eb); |
3953 | 3953 | ||
3954 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, | 3954 | if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, |
3955 | &eb->bflags)) | 3955 | &eb->bflags)) |
3956 | clear_extent_buffer_dirty(eb); | 3956 | clear_extent_buffer_dirty(eb); |
3957 | free_extent_buffer_stale(eb); | 3957 | free_extent_buffer_stale(eb); |
3958 | } | 3958 | } |
3959 | } | 3959 | } |
3960 | 3960 | ||
3961 | return ret; | 3961 | return ret; |
3962 | } | 3962 | } |
3963 | 3963 | ||
3964 | static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | 3964 | static int btrfs_destroy_pinned_extent(struct btrfs_root *root, |
3965 | struct extent_io_tree *pinned_extents) | 3965 | struct extent_io_tree *pinned_extents) |
3966 | { | 3966 | { |
3967 | struct extent_io_tree *unpin; | 3967 | struct extent_io_tree *unpin; |
3968 | u64 start; | 3968 | u64 start; |
3969 | u64 end; | 3969 | u64 end; |
3970 | int ret; | 3970 | int ret; |
3971 | bool loop = true; | 3971 | bool loop = true; |
3972 | 3972 | ||
3973 | unpin = pinned_extents; | 3973 | unpin = pinned_extents; |
3974 | again: | 3974 | again: |
3975 | while (1) { | 3975 | while (1) { |
3976 | ret = find_first_extent_bit(unpin, 0, &start, &end, | 3976 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
3977 | EXTENT_DIRTY, NULL); | 3977 | EXTENT_DIRTY, NULL); |
3978 | if (ret) | 3978 | if (ret) |
3979 | break; | 3979 | break; |
3980 | 3980 | ||
3981 | clear_extent_dirty(unpin, start, end, GFP_NOFS); | 3981 | clear_extent_dirty(unpin, start, end, GFP_NOFS); |
3982 | btrfs_error_unpin_extent_range(root, start, end); | 3982 | btrfs_error_unpin_extent_range(root, start, end); |
3983 | cond_resched(); | 3983 | cond_resched(); |
3984 | } | 3984 | } |
3985 | 3985 | ||
3986 | if (loop) { | 3986 | if (loop) { |
3987 | if (unpin == &root->fs_info->freed_extents[0]) | 3987 | if (unpin == &root->fs_info->freed_extents[0]) |
3988 | unpin = &root->fs_info->freed_extents[1]; | 3988 | unpin = &root->fs_info->freed_extents[1]; |
3989 | else | 3989 | else |
3990 | unpin = &root->fs_info->freed_extents[0]; | 3990 | unpin = &root->fs_info->freed_extents[0]; |
3991 | loop = false; | 3991 | loop = false; |
3992 | goto again; | 3992 | goto again; |
3993 | } | 3993 | } |
3994 | 3994 | ||
3995 | return 0; | 3995 | return 0; |
3996 | } | 3996 | } |
3997 | 3997 | ||
3998 | void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, | 3998 | void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, |
3999 | struct btrfs_root *root) | 3999 | struct btrfs_root *root) |
4000 | { | 4000 | { |
4001 | btrfs_destroy_ordered_operations(cur_trans, root); | 4001 | btrfs_destroy_ordered_operations(cur_trans, root); |
4002 | 4002 | ||
4003 | btrfs_destroy_delayed_refs(cur_trans, root); | 4003 | btrfs_destroy_delayed_refs(cur_trans, root); |
4004 | 4004 | ||
4005 | cur_trans->state = TRANS_STATE_COMMIT_START; | 4005 | cur_trans->state = TRANS_STATE_COMMIT_START; |
4006 | wake_up(&root->fs_info->transaction_blocked_wait); | 4006 | wake_up(&root->fs_info->transaction_blocked_wait); |
4007 | 4007 | ||
4008 | cur_trans->state = TRANS_STATE_UNBLOCKED; | 4008 | cur_trans->state = TRANS_STATE_UNBLOCKED; |
4009 | wake_up(&root->fs_info->transaction_wait); | 4009 | wake_up(&root->fs_info->transaction_wait); |
4010 | 4010 | ||
4011 | btrfs_destroy_delayed_inodes(root); | 4011 | btrfs_destroy_delayed_inodes(root); |
4012 | btrfs_assert_delayed_root_empty(root); | 4012 | btrfs_assert_delayed_root_empty(root); |
4013 | 4013 | ||
4014 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, | 4014 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, |
4015 | EXTENT_DIRTY); | 4015 | EXTENT_DIRTY); |
4016 | btrfs_destroy_pinned_extent(root, | 4016 | btrfs_destroy_pinned_extent(root, |
4017 | root->fs_info->pinned_extents); | 4017 | root->fs_info->pinned_extents); |
4018 | 4018 | ||
4019 | cur_trans->state =TRANS_STATE_COMPLETED; | 4019 | cur_trans->state =TRANS_STATE_COMPLETED; |
4020 | wake_up(&cur_trans->commit_wait); | 4020 | wake_up(&cur_trans->commit_wait); |
4021 | 4021 | ||
4022 | /* | 4022 | /* |
4023 | memset(cur_trans, 0, sizeof(*cur_trans)); | 4023 | memset(cur_trans, 0, sizeof(*cur_trans)); |
4024 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); | 4024 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); |
4025 | */ | 4025 | */ |
4026 | } | 4026 | } |
4027 | 4027 | ||
4028 | static int btrfs_cleanup_transaction(struct btrfs_root *root) | 4028 | static int btrfs_cleanup_transaction(struct btrfs_root *root) |
4029 | { | 4029 | { |
4030 | struct btrfs_transaction *t; | 4030 | struct btrfs_transaction *t; |
4031 | 4031 | ||
4032 | mutex_lock(&root->fs_info->transaction_kthread_mutex); | 4032 | mutex_lock(&root->fs_info->transaction_kthread_mutex); |
4033 | 4033 | ||
4034 | spin_lock(&root->fs_info->trans_lock); | 4034 | spin_lock(&root->fs_info->trans_lock); |
4035 | while (!list_empty(&root->fs_info->trans_list)) { | 4035 | while (!list_empty(&root->fs_info->trans_list)) { |
4036 | t = list_first_entry(&root->fs_info->trans_list, | 4036 | t = list_first_entry(&root->fs_info->trans_list, |
4037 | struct btrfs_transaction, list); | 4037 | struct btrfs_transaction, list); |
4038 | if (t->state >= TRANS_STATE_COMMIT_START) { | 4038 | if (t->state >= TRANS_STATE_COMMIT_START) { |
4039 | atomic_inc(&t->use_count); | 4039 | atomic_inc(&t->use_count); |
4040 | spin_unlock(&root->fs_info->trans_lock); | 4040 | spin_unlock(&root->fs_info->trans_lock); |
4041 | btrfs_wait_for_commit(root, t->transid); | 4041 | btrfs_wait_for_commit(root, t->transid); |
4042 | btrfs_put_transaction(t); | 4042 | btrfs_put_transaction(t); |
4043 | spin_lock(&root->fs_info->trans_lock); | 4043 | spin_lock(&root->fs_info->trans_lock); |
4044 | continue; | 4044 | continue; |
4045 | } | 4045 | } |
4046 | if (t == root->fs_info->running_transaction) { | 4046 | if (t == root->fs_info->running_transaction) { |
4047 | t->state = TRANS_STATE_COMMIT_DOING; | 4047 | t->state = TRANS_STATE_COMMIT_DOING; |
4048 | spin_unlock(&root->fs_info->trans_lock); | 4048 | spin_unlock(&root->fs_info->trans_lock); |
4049 | /* | 4049 | /* |
4050 | * We wait for 0 num_writers since we don't hold a trans | 4050 | * We wait for 0 num_writers since we don't hold a trans |
4051 | * handle open currently for this transaction. | 4051 | * handle open currently for this transaction. |
4052 | */ | 4052 | */ |
4053 | wait_event(t->writer_wait, | 4053 | wait_event(t->writer_wait, |
4054 | atomic_read(&t->num_writers) == 0); | 4054 | atomic_read(&t->num_writers) == 0); |
4055 | } else { | 4055 | } else { |
4056 | spin_unlock(&root->fs_info->trans_lock); | 4056 | spin_unlock(&root->fs_info->trans_lock); |
4057 | } | 4057 | } |
4058 | btrfs_cleanup_one_transaction(t, root); | 4058 | btrfs_cleanup_one_transaction(t, root); |
4059 | 4059 | ||
4060 | spin_lock(&root->fs_info->trans_lock); | 4060 | spin_lock(&root->fs_info->trans_lock); |
4061 | if (t == root->fs_info->running_transaction) | 4061 | if (t == root->fs_info->running_transaction) |
4062 | root->fs_info->running_transaction = NULL; | 4062 | root->fs_info->running_transaction = NULL; |
4063 | list_del_init(&t->list); | 4063 | list_del_init(&t->list); |
4064 | spin_unlock(&root->fs_info->trans_lock); | 4064 | spin_unlock(&root->fs_info->trans_lock); |
4065 | 4065 | ||
4066 | btrfs_put_transaction(t); | 4066 | btrfs_put_transaction(t); |
4067 | trace_btrfs_transaction_commit(root); | 4067 | trace_btrfs_transaction_commit(root); |
4068 | spin_lock(&root->fs_info->trans_lock); | 4068 | spin_lock(&root->fs_info->trans_lock); |
4069 | } | 4069 | } |
4070 | spin_unlock(&root->fs_info->trans_lock); | 4070 | spin_unlock(&root->fs_info->trans_lock); |
4071 | btrfs_destroy_all_ordered_extents(root->fs_info); | 4071 | btrfs_destroy_all_ordered_extents(root->fs_info); |
4072 | btrfs_destroy_delayed_inodes(root); | 4072 | btrfs_destroy_delayed_inodes(root); |
4073 | btrfs_assert_delayed_root_empty(root); | 4073 | btrfs_assert_delayed_root_empty(root); |
4074 | btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); | 4074 | btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); |
4075 | btrfs_destroy_all_delalloc_inodes(root->fs_info); | 4075 | btrfs_destroy_all_delalloc_inodes(root->fs_info); |
4076 | mutex_unlock(&root->fs_info->transaction_kthread_mutex); | 4076 | mutex_unlock(&root->fs_info->transaction_kthread_mutex); |
4077 | 4077 | ||
4078 | return 0; | 4078 | return 0; |
4079 | } | 4079 | } |
4080 | 4080 | ||
4081 | static struct extent_io_ops btree_extent_io_ops = { | 4081 | static struct extent_io_ops btree_extent_io_ops = { |
4082 | .readpage_end_io_hook = btree_readpage_end_io_hook, | 4082 | .readpage_end_io_hook = btree_readpage_end_io_hook, |
4083 | .readpage_io_failed_hook = btree_io_failed_hook, | 4083 | .readpage_io_failed_hook = btree_io_failed_hook, |
4084 | .submit_bio_hook = btree_submit_bio_hook, | 4084 | .submit_bio_hook = btree_submit_bio_hook, |
4085 | /* note we're sharing with inode.c for the merge bio hook */ | 4085 | /* note we're sharing with inode.c for the merge bio hook */ |
4086 | .merge_bio_hook = btrfs_merge_bio_hook, | 4086 | .merge_bio_hook = btrfs_merge_bio_hook, |
4087 | }; | 4087 | }; |
4088 | 4088 |