Commit 6eef3125886df260ca0e8758d141308152226f6a

Authored by Arne Jansen
1 parent 632dd772fc

btrfs: remove unneeded includes from scrub.c

Signed-off-by: Arne Jansen <sensille@gmx.net>

Showing 1 changed file with 0 additions and 6 deletions Inline Diff

1 /* 1 /*
2 * Copyright (C) 2011 STRATO. All rights reserved. 2 * Copyright (C) 2011 STRATO. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public 5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation. 6 * License v2 as published by the Free Software Foundation.
7 * 7 *
8 * This program is distributed in the hope that it will be useful, 8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details. 11 * General Public License for more details.
12 * 12 *
13 * You should have received a copy of the GNU General Public 13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the 14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h> 19 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include "ctree.h" 20 #include "ctree.h"
27 #include "volumes.h" 21 #include "volumes.h"
28 #include "disk-io.h" 22 #include "disk-io.h"
29 #include "ordered-data.h" 23 #include "ordered-data.h"
30 24
31 /* 25 /*
32 * This is only the first step towards a full-features scrub. It reads all 26 * This is only the first step towards a full-features scrub. It reads all
33 * extent and super block and verifies the checksums. In case a bad checksum 27 * extent and super block and verifies the checksums. In case a bad checksum
34 * is found or the extent cannot be read, good data will be written back if 28 * is found or the extent cannot be read, good data will be written back if
35 * any can be found. 29 * any can be found.
36 * 30 *
37 * Future enhancements: 31 * Future enhancements:
38 * - To enhance the performance, better read-ahead strategies for the 32 * - To enhance the performance, better read-ahead strategies for the
39 * extent-tree can be employed. 33 * extent-tree can be employed.
40 * - In case an unrepairable extent is encountered, track which files are 34 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them 35 * affected and report them
42 * - In case of a read error on files with nodatasum, map the file and read 36 * - In case of a read error on files with nodatasum, map the file and read
43 * the extent to trigger a writeback of the good copy 37 * the extent to trigger a writeback of the good copy
44 * - track and record media errors, throw out bad devices 38 * - track and record media errors, throw out bad devices
45 * - add a mode to also read unallocated space 39 * - add a mode to also read unallocated space
46 * - make the prefetch cancellable 40 * - make the prefetch cancellable
47 */ 41 */
48 42
49 struct scrub_bio; 43 struct scrub_bio;
50 struct scrub_page; 44 struct scrub_page;
51 struct scrub_dev; 45 struct scrub_dev;
52 static void scrub_bio_end_io(struct bio *bio, int err); 46 static void scrub_bio_end_io(struct bio *bio, int err);
53 static void scrub_checksum(struct btrfs_work *work); 47 static void scrub_checksum(struct btrfs_work *work);
54 static int scrub_checksum_data(struct scrub_dev *sdev, 48 static int scrub_checksum_data(struct scrub_dev *sdev,
55 struct scrub_page *spag, void *buffer); 49 struct scrub_page *spag, void *buffer);
56 static int scrub_checksum_tree_block(struct scrub_dev *sdev, 50 static int scrub_checksum_tree_block(struct scrub_dev *sdev,
57 struct scrub_page *spag, u64 logical, 51 struct scrub_page *spag, u64 logical,
58 void *buffer); 52 void *buffer);
59 static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer); 53 static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer);
60 static int scrub_fixup_check(struct scrub_bio *sbio, int ix); 54 static int scrub_fixup_check(struct scrub_bio *sbio, int ix);
61 static void scrub_fixup_end_io(struct bio *bio, int err); 55 static void scrub_fixup_end_io(struct bio *bio, int err);
62 static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, 56 static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
63 struct page *page); 57 struct page *page);
64 static void scrub_fixup(struct scrub_bio *sbio, int ix); 58 static void scrub_fixup(struct scrub_bio *sbio, int ix);
65 59
66 #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ 60 #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
67 #define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */ 61 #define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
68 62
69 struct scrub_page { 63 struct scrub_page {
70 u64 flags; /* extent flags */ 64 u64 flags; /* extent flags */
71 u64 generation; 65 u64 generation;
72 u64 mirror_num; 66 u64 mirror_num;
73 int have_csum; 67 int have_csum;
74 u8 csum[BTRFS_CSUM_SIZE]; 68 u8 csum[BTRFS_CSUM_SIZE];
75 }; 69 };
76 70
77 struct scrub_bio { 71 struct scrub_bio {
78 int index; 72 int index;
79 struct scrub_dev *sdev; 73 struct scrub_dev *sdev;
80 struct bio *bio; 74 struct bio *bio;
81 int err; 75 int err;
82 u64 logical; 76 u64 logical;
83 u64 physical; 77 u64 physical;
84 struct scrub_page spag[SCRUB_PAGES_PER_BIO]; 78 struct scrub_page spag[SCRUB_PAGES_PER_BIO];
85 u64 count; 79 u64 count;
86 int next_free; 80 int next_free;
87 struct btrfs_work work; 81 struct btrfs_work work;
88 }; 82 };
89 83
90 struct scrub_dev { 84 struct scrub_dev {
91 struct scrub_bio *bios[SCRUB_BIOS_PER_DEV]; 85 struct scrub_bio *bios[SCRUB_BIOS_PER_DEV];
92 struct btrfs_device *dev; 86 struct btrfs_device *dev;
93 int first_free; 87 int first_free;
94 int curr; 88 int curr;
95 atomic_t in_flight; 89 atomic_t in_flight;
96 spinlock_t list_lock; 90 spinlock_t list_lock;
97 wait_queue_head_t list_wait; 91 wait_queue_head_t list_wait;
98 u16 csum_size; 92 u16 csum_size;
99 struct list_head csum_list; 93 struct list_head csum_list;
100 atomic_t cancel_req; 94 atomic_t cancel_req;
101 int readonly; 95 int readonly;
102 /* 96 /*
103 * statistics 97 * statistics
104 */ 98 */
105 struct btrfs_scrub_progress stat; 99 struct btrfs_scrub_progress stat;
106 spinlock_t stat_lock; 100 spinlock_t stat_lock;
107 }; 101 };
108 102
109 static void scrub_free_csums(struct scrub_dev *sdev) 103 static void scrub_free_csums(struct scrub_dev *sdev)
110 { 104 {
111 while (!list_empty(&sdev->csum_list)) { 105 while (!list_empty(&sdev->csum_list)) {
112 struct btrfs_ordered_sum *sum; 106 struct btrfs_ordered_sum *sum;
113 sum = list_first_entry(&sdev->csum_list, 107 sum = list_first_entry(&sdev->csum_list,
114 struct btrfs_ordered_sum, list); 108 struct btrfs_ordered_sum, list);
115 list_del(&sum->list); 109 list_del(&sum->list);
116 kfree(sum); 110 kfree(sum);
117 } 111 }
118 } 112 }
119 113
120 static void scrub_free_bio(struct bio *bio) 114 static void scrub_free_bio(struct bio *bio)
121 { 115 {
122 int i; 116 int i;
123 struct page *last_page = NULL; 117 struct page *last_page = NULL;
124 118
125 if (!bio) 119 if (!bio)
126 return; 120 return;
127 121
128 for (i = 0; i < bio->bi_vcnt; ++i) { 122 for (i = 0; i < bio->bi_vcnt; ++i) {
129 if (bio->bi_io_vec[i].bv_page == last_page) 123 if (bio->bi_io_vec[i].bv_page == last_page)
130 continue; 124 continue;
131 last_page = bio->bi_io_vec[i].bv_page; 125 last_page = bio->bi_io_vec[i].bv_page;
132 __free_page(last_page); 126 __free_page(last_page);
133 } 127 }
134 bio_put(bio); 128 bio_put(bio);
135 } 129 }
136 130
137 static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) 131 static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
138 { 132 {
139 int i; 133 int i;
140 134
141 if (!sdev) 135 if (!sdev)
142 return; 136 return;
143 137
144 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 138 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
145 struct scrub_bio *sbio = sdev->bios[i]; 139 struct scrub_bio *sbio = sdev->bios[i];
146 140
147 if (!sbio) 141 if (!sbio)
148 break; 142 break;
149 143
150 scrub_free_bio(sbio->bio); 144 scrub_free_bio(sbio->bio);
151 kfree(sbio); 145 kfree(sbio);
152 } 146 }
153 147
154 scrub_free_csums(sdev); 148 scrub_free_csums(sdev);
155 kfree(sdev); 149 kfree(sdev);
156 } 150 }
157 151
158 static noinline_for_stack 152 static noinline_for_stack
159 struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) 153 struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
160 { 154 {
161 struct scrub_dev *sdev; 155 struct scrub_dev *sdev;
162 int i; 156 int i;
163 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 157 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
164 158
165 sdev = kzalloc(sizeof(*sdev), GFP_NOFS); 159 sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
166 if (!sdev) 160 if (!sdev)
167 goto nomem; 161 goto nomem;
168 sdev->dev = dev; 162 sdev->dev = dev;
169 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 163 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
170 struct scrub_bio *sbio; 164 struct scrub_bio *sbio;
171 165
172 sbio = kzalloc(sizeof(*sbio), GFP_NOFS); 166 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
173 if (!sbio) 167 if (!sbio)
174 goto nomem; 168 goto nomem;
175 sdev->bios[i] = sbio; 169 sdev->bios[i] = sbio;
176 170
177 sbio->index = i; 171 sbio->index = i;
178 sbio->sdev = sdev; 172 sbio->sdev = sdev;
179 sbio->count = 0; 173 sbio->count = 0;
180 sbio->work.func = scrub_checksum; 174 sbio->work.func = scrub_checksum;
181 175
182 if (i != SCRUB_BIOS_PER_DEV-1) 176 if (i != SCRUB_BIOS_PER_DEV-1)
183 sdev->bios[i]->next_free = i + 1; 177 sdev->bios[i]->next_free = i + 1;
184 else 178 else
185 sdev->bios[i]->next_free = -1; 179 sdev->bios[i]->next_free = -1;
186 } 180 }
187 sdev->first_free = 0; 181 sdev->first_free = 0;
188 sdev->curr = -1; 182 sdev->curr = -1;
189 atomic_set(&sdev->in_flight, 0); 183 atomic_set(&sdev->in_flight, 0);
190 atomic_set(&sdev->cancel_req, 0); 184 atomic_set(&sdev->cancel_req, 0);
191 sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy); 185 sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy);
192 INIT_LIST_HEAD(&sdev->csum_list); 186 INIT_LIST_HEAD(&sdev->csum_list);
193 187
194 spin_lock_init(&sdev->list_lock); 188 spin_lock_init(&sdev->list_lock);
195 spin_lock_init(&sdev->stat_lock); 189 spin_lock_init(&sdev->stat_lock);
196 init_waitqueue_head(&sdev->list_wait); 190 init_waitqueue_head(&sdev->list_wait);
197 return sdev; 191 return sdev;
198 192
199 nomem: 193 nomem:
200 scrub_free_dev(sdev); 194 scrub_free_dev(sdev);
201 return ERR_PTR(-ENOMEM); 195 return ERR_PTR(-ENOMEM);
202 } 196 }
203 197
204 /* 198 /*
205 * scrub_recheck_error gets called when either verification of the page 199 * scrub_recheck_error gets called when either verification of the page
206 * failed or the bio failed to read, e.g. with EIO. In the latter case, 200 * failed or the bio failed to read, e.g. with EIO. In the latter case,
207 * recheck_error gets called for every page in the bio, even though only 201 * recheck_error gets called for every page in the bio, even though only
208 * one may be bad 202 * one may be bad
209 */ 203 */
210 static void scrub_recheck_error(struct scrub_bio *sbio, int ix) 204 static void scrub_recheck_error(struct scrub_bio *sbio, int ix)
211 { 205 {
212 if (sbio->err) { 206 if (sbio->err) {
213 if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, 207 if (scrub_fixup_io(READ, sbio->sdev->dev->bdev,
214 (sbio->physical + ix * PAGE_SIZE) >> 9, 208 (sbio->physical + ix * PAGE_SIZE) >> 9,
215 sbio->bio->bi_io_vec[ix].bv_page) == 0) { 209 sbio->bio->bi_io_vec[ix].bv_page) == 0) {
216 if (scrub_fixup_check(sbio, ix) == 0) 210 if (scrub_fixup_check(sbio, ix) == 0)
217 return; 211 return;
218 } 212 }
219 } 213 }
220 214
221 scrub_fixup(sbio, ix); 215 scrub_fixup(sbio, ix);
222 } 216 }
223 217
224 static int scrub_fixup_check(struct scrub_bio *sbio, int ix) 218 static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
225 { 219 {
226 int ret = 1; 220 int ret = 1;
227 struct page *page; 221 struct page *page;
228 void *buffer; 222 void *buffer;
229 u64 flags = sbio->spag[ix].flags; 223 u64 flags = sbio->spag[ix].flags;
230 224
231 page = sbio->bio->bi_io_vec[ix].bv_page; 225 page = sbio->bio->bi_io_vec[ix].bv_page;
232 buffer = kmap_atomic(page, KM_USER0); 226 buffer = kmap_atomic(page, KM_USER0);
233 if (flags & BTRFS_EXTENT_FLAG_DATA) { 227 if (flags & BTRFS_EXTENT_FLAG_DATA) {
234 ret = scrub_checksum_data(sbio->sdev, 228 ret = scrub_checksum_data(sbio->sdev,
235 sbio->spag + ix, buffer); 229 sbio->spag + ix, buffer);
236 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 230 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
237 ret = scrub_checksum_tree_block(sbio->sdev, 231 ret = scrub_checksum_tree_block(sbio->sdev,
238 sbio->spag + ix, 232 sbio->spag + ix,
239 sbio->logical + ix * PAGE_SIZE, 233 sbio->logical + ix * PAGE_SIZE,
240 buffer); 234 buffer);
241 } else { 235 } else {
242 WARN_ON(1); 236 WARN_ON(1);
243 } 237 }
244 kunmap_atomic(buffer, KM_USER0); 238 kunmap_atomic(buffer, KM_USER0);
245 239
246 return ret; 240 return ret;
247 } 241 }
248 242
249 static void scrub_fixup_end_io(struct bio *bio, int err) 243 static void scrub_fixup_end_io(struct bio *bio, int err)
250 { 244 {
251 complete((struct completion *)bio->bi_private); 245 complete((struct completion *)bio->bi_private);
252 } 246 }
253 247
254 static void scrub_fixup(struct scrub_bio *sbio, int ix) 248 static void scrub_fixup(struct scrub_bio *sbio, int ix)
255 { 249 {
256 struct scrub_dev *sdev = sbio->sdev; 250 struct scrub_dev *sdev = sbio->sdev;
257 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 251 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
258 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 252 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
259 struct btrfs_multi_bio *multi = NULL; 253 struct btrfs_multi_bio *multi = NULL;
260 u64 logical = sbio->logical + ix * PAGE_SIZE; 254 u64 logical = sbio->logical + ix * PAGE_SIZE;
261 u64 length; 255 u64 length;
262 int i; 256 int i;
263 int ret; 257 int ret;
264 DECLARE_COMPLETION_ONSTACK(complete); 258 DECLARE_COMPLETION_ONSTACK(complete);
265 259
266 if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) && 260 if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
267 (sbio->spag[ix].have_csum == 0)) { 261 (sbio->spag[ix].have_csum == 0)) {
268 /* 262 /*
269 * nodatasum, don't try to fix anything 263 * nodatasum, don't try to fix anything
270 * FIXME: we can do better, open the inode and trigger a 264 * FIXME: we can do better, open the inode and trigger a
271 * writeback 265 * writeback
272 */ 266 */
273 goto uncorrectable; 267 goto uncorrectable;
274 } 268 }
275 269
276 length = PAGE_SIZE; 270 length = PAGE_SIZE;
277 ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, 271 ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
278 &multi, 0); 272 &multi, 0);
279 if (ret || !multi || length < PAGE_SIZE) { 273 if (ret || !multi || length < PAGE_SIZE) {
280 printk(KERN_ERR 274 printk(KERN_ERR
281 "scrub_fixup: btrfs_map_block failed us for %llu\n", 275 "scrub_fixup: btrfs_map_block failed us for %llu\n",
282 (unsigned long long)logical); 276 (unsigned long long)logical);
283 WARN_ON(1); 277 WARN_ON(1);
284 return; 278 return;
285 } 279 }
286 280
287 if (multi->num_stripes == 1) 281 if (multi->num_stripes == 1)
288 /* there aren't any replicas */ 282 /* there aren't any replicas */
289 goto uncorrectable; 283 goto uncorrectable;
290 284
291 /* 285 /*
292 * first find a good copy 286 * first find a good copy
293 */ 287 */
294 for (i = 0; i < multi->num_stripes; ++i) { 288 for (i = 0; i < multi->num_stripes; ++i) {
295 if (i == sbio->spag[ix].mirror_num) 289 if (i == sbio->spag[ix].mirror_num)
296 continue; 290 continue;
297 291
298 if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev, 292 if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev,
299 multi->stripes[i].physical >> 9, 293 multi->stripes[i].physical >> 9,
300 sbio->bio->bi_io_vec[ix].bv_page)) { 294 sbio->bio->bi_io_vec[ix].bv_page)) {
301 /* I/O-error, this is not a good copy */ 295 /* I/O-error, this is not a good copy */
302 continue; 296 continue;
303 } 297 }
304 298
305 if (scrub_fixup_check(sbio, ix) == 0) 299 if (scrub_fixup_check(sbio, ix) == 0)
306 break; 300 break;
307 } 301 }
308 if (i == multi->num_stripes) 302 if (i == multi->num_stripes)
309 goto uncorrectable; 303 goto uncorrectable;
310 304
311 if (!sdev->readonly) { 305 if (!sdev->readonly) {
312 /* 306 /*
313 * bi_io_vec[ix].bv_page now contains good data, write it back 307 * bi_io_vec[ix].bv_page now contains good data, write it back
314 */ 308 */
315 if (scrub_fixup_io(WRITE, sdev->dev->bdev, 309 if (scrub_fixup_io(WRITE, sdev->dev->bdev,
316 (sbio->physical + ix * PAGE_SIZE) >> 9, 310 (sbio->physical + ix * PAGE_SIZE) >> 9,
317 sbio->bio->bi_io_vec[ix].bv_page)) { 311 sbio->bio->bi_io_vec[ix].bv_page)) {
318 /* I/O-error, writeback failed, give up */ 312 /* I/O-error, writeback failed, give up */
319 goto uncorrectable; 313 goto uncorrectable;
320 } 314 }
321 } 315 }
322 316
323 kfree(multi); 317 kfree(multi);
324 spin_lock(&sdev->stat_lock); 318 spin_lock(&sdev->stat_lock);
325 ++sdev->stat.corrected_errors; 319 ++sdev->stat.corrected_errors;
326 spin_unlock(&sdev->stat_lock); 320 spin_unlock(&sdev->stat_lock);
327 321
328 if (printk_ratelimit()) 322 if (printk_ratelimit())
329 printk(KERN_ERR "btrfs: fixed up at %llu\n", 323 printk(KERN_ERR "btrfs: fixed up at %llu\n",
330 (unsigned long long)logical); 324 (unsigned long long)logical);
331 return; 325 return;
332 326
333 uncorrectable: 327 uncorrectable:
334 kfree(multi); 328 kfree(multi);
335 spin_lock(&sdev->stat_lock); 329 spin_lock(&sdev->stat_lock);
336 ++sdev->stat.uncorrectable_errors; 330 ++sdev->stat.uncorrectable_errors;
337 spin_unlock(&sdev->stat_lock); 331 spin_unlock(&sdev->stat_lock);
338 332
339 if (printk_ratelimit()) 333 if (printk_ratelimit())
340 printk(KERN_ERR "btrfs: unable to fixup at %llu\n", 334 printk(KERN_ERR "btrfs: unable to fixup at %llu\n",
341 (unsigned long long)logical); 335 (unsigned long long)logical);
342 } 336 }
343 337
344 static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, 338 static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
345 struct page *page) 339 struct page *page)
346 { 340 {
347 struct bio *bio = NULL; 341 struct bio *bio = NULL;
348 int ret; 342 int ret;
349 DECLARE_COMPLETION_ONSTACK(complete); 343 DECLARE_COMPLETION_ONSTACK(complete);
350 344
351 bio = bio_alloc(GFP_NOFS, 1); 345 bio = bio_alloc(GFP_NOFS, 1);
352 bio->bi_bdev = bdev; 346 bio->bi_bdev = bdev;
353 bio->bi_sector = sector; 347 bio->bi_sector = sector;
354 bio_add_page(bio, page, PAGE_SIZE, 0); 348 bio_add_page(bio, page, PAGE_SIZE, 0);
355 bio->bi_end_io = scrub_fixup_end_io; 349 bio->bi_end_io = scrub_fixup_end_io;
356 bio->bi_private = &complete; 350 bio->bi_private = &complete;
357 submit_bio(rw, bio); 351 submit_bio(rw, bio);
358 352
359 /* this will also unplug the queue */ 353 /* this will also unplug the queue */
360 wait_for_completion(&complete); 354 wait_for_completion(&complete);
361 355
362 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); 356 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
363 bio_put(bio); 357 bio_put(bio);
364 return ret; 358 return ret;
365 } 359 }
366 360
367 static void scrub_bio_end_io(struct bio *bio, int err) 361 static void scrub_bio_end_io(struct bio *bio, int err)
368 { 362 {
369 struct scrub_bio *sbio = bio->bi_private; 363 struct scrub_bio *sbio = bio->bi_private;
370 struct scrub_dev *sdev = sbio->sdev; 364 struct scrub_dev *sdev = sbio->sdev;
371 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 365 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
372 366
373 sbio->err = err; 367 sbio->err = err;
374 sbio->bio = bio; 368 sbio->bio = bio;
375 369
376 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); 370 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
377 } 371 }
378 372
379 static void scrub_checksum(struct btrfs_work *work) 373 static void scrub_checksum(struct btrfs_work *work)
380 { 374 {
381 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 375 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
382 struct scrub_dev *sdev = sbio->sdev; 376 struct scrub_dev *sdev = sbio->sdev;
383 struct page *page; 377 struct page *page;
384 void *buffer; 378 void *buffer;
385 int i; 379 int i;
386 u64 flags; 380 u64 flags;
387 u64 logical; 381 u64 logical;
388 int ret; 382 int ret;
389 383
390 if (sbio->err) { 384 if (sbio->err) {
391 for (i = 0; i < sbio->count; ++i) 385 for (i = 0; i < sbio->count; ++i)
392 scrub_recheck_error(sbio, i); 386 scrub_recheck_error(sbio, i);
393 387
394 sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1); 388 sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
395 sbio->bio->bi_flags |= 1 << BIO_UPTODATE; 389 sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
396 sbio->bio->bi_phys_segments = 0; 390 sbio->bio->bi_phys_segments = 0;
397 sbio->bio->bi_idx = 0; 391 sbio->bio->bi_idx = 0;
398 392
399 for (i = 0; i < sbio->count; i++) { 393 for (i = 0; i < sbio->count; i++) {
400 struct bio_vec *bi; 394 struct bio_vec *bi;
401 bi = &sbio->bio->bi_io_vec[i]; 395 bi = &sbio->bio->bi_io_vec[i];
402 bi->bv_offset = 0; 396 bi->bv_offset = 0;
403 bi->bv_len = PAGE_SIZE; 397 bi->bv_len = PAGE_SIZE;
404 } 398 }
405 399
406 spin_lock(&sdev->stat_lock); 400 spin_lock(&sdev->stat_lock);
407 ++sdev->stat.read_errors; 401 ++sdev->stat.read_errors;
408 spin_unlock(&sdev->stat_lock); 402 spin_unlock(&sdev->stat_lock);
409 goto out; 403 goto out;
410 } 404 }
411 for (i = 0; i < sbio->count; ++i) { 405 for (i = 0; i < sbio->count; ++i) {
412 page = sbio->bio->bi_io_vec[i].bv_page; 406 page = sbio->bio->bi_io_vec[i].bv_page;
413 buffer = kmap_atomic(page, KM_USER0); 407 buffer = kmap_atomic(page, KM_USER0);
414 flags = sbio->spag[i].flags; 408 flags = sbio->spag[i].flags;
415 logical = sbio->logical + i * PAGE_SIZE; 409 logical = sbio->logical + i * PAGE_SIZE;
416 ret = 0; 410 ret = 0;
417 if (flags & BTRFS_EXTENT_FLAG_DATA) { 411 if (flags & BTRFS_EXTENT_FLAG_DATA) {
418 ret = scrub_checksum_data(sdev, sbio->spag + i, buffer); 412 ret = scrub_checksum_data(sdev, sbio->spag + i, buffer);
419 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 413 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
420 ret = scrub_checksum_tree_block(sdev, sbio->spag + i, 414 ret = scrub_checksum_tree_block(sdev, sbio->spag + i,
421 logical, buffer); 415 logical, buffer);
422 } else if (flags & BTRFS_EXTENT_FLAG_SUPER) { 416 } else if (flags & BTRFS_EXTENT_FLAG_SUPER) {
423 BUG_ON(i); 417 BUG_ON(i);
424 (void)scrub_checksum_super(sbio, buffer); 418 (void)scrub_checksum_super(sbio, buffer);
425 } else { 419 } else {
426 WARN_ON(1); 420 WARN_ON(1);
427 } 421 }
428 kunmap_atomic(buffer, KM_USER0); 422 kunmap_atomic(buffer, KM_USER0);
429 if (ret) 423 if (ret)
430 scrub_recheck_error(sbio, i); 424 scrub_recheck_error(sbio, i);
431 } 425 }
432 426
433 out: 427 out:
434 scrub_free_bio(sbio->bio); 428 scrub_free_bio(sbio->bio);
435 sbio->bio = NULL; 429 sbio->bio = NULL;
436 spin_lock(&sdev->list_lock); 430 spin_lock(&sdev->list_lock);
437 sbio->next_free = sdev->first_free; 431 sbio->next_free = sdev->first_free;
438 sdev->first_free = sbio->index; 432 sdev->first_free = sbio->index;
439 spin_unlock(&sdev->list_lock); 433 spin_unlock(&sdev->list_lock);
440 atomic_dec(&sdev->in_flight); 434 atomic_dec(&sdev->in_flight);
441 wake_up(&sdev->list_wait); 435 wake_up(&sdev->list_wait);
442 } 436 }
443 437
444 static int scrub_checksum_data(struct scrub_dev *sdev, 438 static int scrub_checksum_data(struct scrub_dev *sdev,
445 struct scrub_page *spag, void *buffer) 439 struct scrub_page *spag, void *buffer)
446 { 440 {
447 u8 csum[BTRFS_CSUM_SIZE]; 441 u8 csum[BTRFS_CSUM_SIZE];
448 u32 crc = ~(u32)0; 442 u32 crc = ~(u32)0;
449 int fail = 0; 443 int fail = 0;
450 struct btrfs_root *root = sdev->dev->dev_root; 444 struct btrfs_root *root = sdev->dev->dev_root;
451 445
452 if (!spag->have_csum) 446 if (!spag->have_csum)
453 return 0; 447 return 0;
454 448
455 crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE); 449 crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE);
456 btrfs_csum_final(crc, csum); 450 btrfs_csum_final(crc, csum);
457 if (memcmp(csum, spag->csum, sdev->csum_size)) 451 if (memcmp(csum, spag->csum, sdev->csum_size))
458 fail = 1; 452 fail = 1;
459 453
460 spin_lock(&sdev->stat_lock); 454 spin_lock(&sdev->stat_lock);
461 ++sdev->stat.data_extents_scrubbed; 455 ++sdev->stat.data_extents_scrubbed;
462 sdev->stat.data_bytes_scrubbed += PAGE_SIZE; 456 sdev->stat.data_bytes_scrubbed += PAGE_SIZE;
463 if (fail) 457 if (fail)
464 ++sdev->stat.csum_errors; 458 ++sdev->stat.csum_errors;
465 spin_unlock(&sdev->stat_lock); 459 spin_unlock(&sdev->stat_lock);
466 460
467 return fail; 461 return fail;
468 } 462 }
469 463
470 static int scrub_checksum_tree_block(struct scrub_dev *sdev, 464 static int scrub_checksum_tree_block(struct scrub_dev *sdev,
471 struct scrub_page *spag, u64 logical, 465 struct scrub_page *spag, u64 logical,
472 void *buffer) 466 void *buffer)
473 { 467 {
474 struct btrfs_header *h; 468 struct btrfs_header *h;
475 struct btrfs_root *root = sdev->dev->dev_root; 469 struct btrfs_root *root = sdev->dev->dev_root;
476 struct btrfs_fs_info *fs_info = root->fs_info; 470 struct btrfs_fs_info *fs_info = root->fs_info;
477 u8 csum[BTRFS_CSUM_SIZE]; 471 u8 csum[BTRFS_CSUM_SIZE];
478 u32 crc = ~(u32)0; 472 u32 crc = ~(u32)0;
479 int fail = 0; 473 int fail = 0;
480 int crc_fail = 0; 474 int crc_fail = 0;
481 475
482 /* 476 /*
483 * we don't use the getter functions here, as we 477 * we don't use the getter functions here, as we
484 * a) don't have an extent buffer and 478 * a) don't have an extent buffer and
485 * b) the page is already kmapped 479 * b) the page is already kmapped
486 */ 480 */
487 h = (struct btrfs_header *)buffer; 481 h = (struct btrfs_header *)buffer;
488 482
489 if (logical != le64_to_cpu(h->bytenr)) 483 if (logical != le64_to_cpu(h->bytenr))
490 ++fail; 484 ++fail;
491 485
492 if (spag->generation != le64_to_cpu(h->generation)) 486 if (spag->generation != le64_to_cpu(h->generation))
493 ++fail; 487 ++fail;
494 488
495 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 489 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
496 ++fail; 490 ++fail;
497 491
498 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, 492 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
499 BTRFS_UUID_SIZE)) 493 BTRFS_UUID_SIZE))
500 ++fail; 494 ++fail;
501 495
502 crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, 496 crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
503 PAGE_SIZE - BTRFS_CSUM_SIZE); 497 PAGE_SIZE - BTRFS_CSUM_SIZE);
504 btrfs_csum_final(crc, csum); 498 btrfs_csum_final(crc, csum);
505 if (memcmp(csum, h->csum, sdev->csum_size)) 499 if (memcmp(csum, h->csum, sdev->csum_size))
506 ++crc_fail; 500 ++crc_fail;
507 501
508 spin_lock(&sdev->stat_lock); 502 spin_lock(&sdev->stat_lock);
509 ++sdev->stat.tree_extents_scrubbed; 503 ++sdev->stat.tree_extents_scrubbed;
510 sdev->stat.tree_bytes_scrubbed += PAGE_SIZE; 504 sdev->stat.tree_bytes_scrubbed += PAGE_SIZE;
511 if (crc_fail) 505 if (crc_fail)
512 ++sdev->stat.csum_errors; 506 ++sdev->stat.csum_errors;
513 if (fail) 507 if (fail)
514 ++sdev->stat.verify_errors; 508 ++sdev->stat.verify_errors;
515 spin_unlock(&sdev->stat_lock); 509 spin_unlock(&sdev->stat_lock);
516 510
517 return fail || crc_fail; 511 return fail || crc_fail;
518 } 512 }
519 513
520 static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) 514 static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
521 { 515 {
522 struct btrfs_super_block *s; 516 struct btrfs_super_block *s;
523 u64 logical; 517 u64 logical;
524 struct scrub_dev *sdev = sbio->sdev; 518 struct scrub_dev *sdev = sbio->sdev;
525 struct btrfs_root *root = sdev->dev->dev_root; 519 struct btrfs_root *root = sdev->dev->dev_root;
526 struct btrfs_fs_info *fs_info = root->fs_info; 520 struct btrfs_fs_info *fs_info = root->fs_info;
527 u8 csum[BTRFS_CSUM_SIZE]; 521 u8 csum[BTRFS_CSUM_SIZE];
528 u32 crc = ~(u32)0; 522 u32 crc = ~(u32)0;
529 int fail = 0; 523 int fail = 0;
530 524
531 s = (struct btrfs_super_block *)buffer; 525 s = (struct btrfs_super_block *)buffer;
532 logical = sbio->logical; 526 logical = sbio->logical;
533 527
534 if (logical != le64_to_cpu(s->bytenr)) 528 if (logical != le64_to_cpu(s->bytenr))
535 ++fail; 529 ++fail;
536 530
537 if (sbio->spag[0].generation != le64_to_cpu(s->generation)) 531 if (sbio->spag[0].generation != le64_to_cpu(s->generation))
538 ++fail; 532 ++fail;
539 533
540 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 534 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
541 ++fail; 535 ++fail;
542 536
543 crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, 537 crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
544 PAGE_SIZE - BTRFS_CSUM_SIZE); 538 PAGE_SIZE - BTRFS_CSUM_SIZE);
545 btrfs_csum_final(crc, csum); 539 btrfs_csum_final(crc, csum);
546 if (memcmp(csum, s->csum, sbio->sdev->csum_size)) 540 if (memcmp(csum, s->csum, sbio->sdev->csum_size))
547 ++fail; 541 ++fail;
548 542
549 if (fail) { 543 if (fail) {
550 /* 544 /*
551 * if we find an error in a super block, we just report it. 545 * if we find an error in a super block, we just report it.
552 * They will get written with the next transaction commit 546 * They will get written with the next transaction commit
553 * anyway 547 * anyway
554 */ 548 */
555 spin_lock(&sdev->stat_lock); 549 spin_lock(&sdev->stat_lock);
556 ++sdev->stat.super_errors; 550 ++sdev->stat.super_errors;
557 spin_unlock(&sdev->stat_lock); 551 spin_unlock(&sdev->stat_lock);
558 } 552 }
559 553
560 return fail; 554 return fail;
561 } 555 }
562 556
563 static int scrub_submit(struct scrub_dev *sdev) 557 static int scrub_submit(struct scrub_dev *sdev)
564 { 558 {
565 struct scrub_bio *sbio; 559 struct scrub_bio *sbio;
566 struct bio *bio; 560 struct bio *bio;
567 int i; 561 int i;
568 562
569 if (sdev->curr == -1) 563 if (sdev->curr == -1)
570 return 0; 564 return 0;
571 565
572 sbio = sdev->bios[sdev->curr]; 566 sbio = sdev->bios[sdev->curr];
573 567
574 bio = bio_alloc(GFP_NOFS, sbio->count); 568 bio = bio_alloc(GFP_NOFS, sbio->count);
575 if (!bio) 569 if (!bio)
576 goto nomem; 570 goto nomem;
577 571
578 bio->bi_private = sbio; 572 bio->bi_private = sbio;
579 bio->bi_end_io = scrub_bio_end_io; 573 bio->bi_end_io = scrub_bio_end_io;
580 bio->bi_bdev = sdev->dev->bdev; 574 bio->bi_bdev = sdev->dev->bdev;
581 bio->bi_sector = sbio->physical >> 9; 575 bio->bi_sector = sbio->physical >> 9;
582 576
583 for (i = 0; i < sbio->count; ++i) { 577 for (i = 0; i < sbio->count; ++i) {
584 struct page *page; 578 struct page *page;
585 int ret; 579 int ret;
586 580
587 page = alloc_page(GFP_NOFS); 581 page = alloc_page(GFP_NOFS);
588 if (!page) 582 if (!page)
589 goto nomem; 583 goto nomem;
590 584
591 ret = bio_add_page(bio, page, PAGE_SIZE, 0); 585 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
592 if (!ret) { 586 if (!ret) {
593 __free_page(page); 587 __free_page(page);
594 goto nomem; 588 goto nomem;
595 } 589 }
596 } 590 }
597 591
598 sbio->err = 0; 592 sbio->err = 0;
599 sdev->curr = -1; 593 sdev->curr = -1;
600 atomic_inc(&sdev->in_flight); 594 atomic_inc(&sdev->in_flight);
601 595
602 submit_bio(READ, bio); 596 submit_bio(READ, bio);
603 597
604 return 0; 598 return 0;
605 599
606 nomem: 600 nomem:
607 scrub_free_bio(bio); 601 scrub_free_bio(bio);
608 602
609 return -ENOMEM; 603 return -ENOMEM;
610 } 604 }
611 605
612 static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 606 static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
613 u64 physical, u64 flags, u64 gen, u64 mirror_num, 607 u64 physical, u64 flags, u64 gen, u64 mirror_num,
614 u8 *csum, int force) 608 u8 *csum, int force)
615 { 609 {
616 struct scrub_bio *sbio; 610 struct scrub_bio *sbio;
617 611
618 again: 612 again:
619 /* 613 /*
620 * grab a fresh bio or wait for one to become available 614 * grab a fresh bio or wait for one to become available
621 */ 615 */
622 while (sdev->curr == -1) { 616 while (sdev->curr == -1) {
623 spin_lock(&sdev->list_lock); 617 spin_lock(&sdev->list_lock);
624 sdev->curr = sdev->first_free; 618 sdev->curr = sdev->first_free;
625 if (sdev->curr != -1) { 619 if (sdev->curr != -1) {
626 sdev->first_free = sdev->bios[sdev->curr]->next_free; 620 sdev->first_free = sdev->bios[sdev->curr]->next_free;
627 sdev->bios[sdev->curr]->next_free = -1; 621 sdev->bios[sdev->curr]->next_free = -1;
628 sdev->bios[sdev->curr]->count = 0; 622 sdev->bios[sdev->curr]->count = 0;
629 spin_unlock(&sdev->list_lock); 623 spin_unlock(&sdev->list_lock);
630 } else { 624 } else {
631 spin_unlock(&sdev->list_lock); 625 spin_unlock(&sdev->list_lock);
632 wait_event(sdev->list_wait, sdev->first_free != -1); 626 wait_event(sdev->list_wait, sdev->first_free != -1);
633 } 627 }
634 } 628 }
635 sbio = sdev->bios[sdev->curr]; 629 sbio = sdev->bios[sdev->curr];
636 if (sbio->count == 0) { 630 if (sbio->count == 0) {
637 sbio->physical = physical; 631 sbio->physical = physical;
638 sbio->logical = logical; 632 sbio->logical = logical;
639 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 633 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
640 sbio->logical + sbio->count * PAGE_SIZE != logical) { 634 sbio->logical + sbio->count * PAGE_SIZE != logical) {
641 int ret; 635 int ret;
642 636
643 ret = scrub_submit(sdev); 637 ret = scrub_submit(sdev);
644 if (ret) 638 if (ret)
645 return ret; 639 return ret;
646 goto again; 640 goto again;
647 } 641 }
648 sbio->spag[sbio->count].flags = flags; 642 sbio->spag[sbio->count].flags = flags;
649 sbio->spag[sbio->count].generation = gen; 643 sbio->spag[sbio->count].generation = gen;
650 sbio->spag[sbio->count].have_csum = 0; 644 sbio->spag[sbio->count].have_csum = 0;
651 sbio->spag[sbio->count].mirror_num = mirror_num; 645 sbio->spag[sbio->count].mirror_num = mirror_num;
652 if (csum) { 646 if (csum) {
653 sbio->spag[sbio->count].have_csum = 1; 647 sbio->spag[sbio->count].have_csum = 1;
654 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 648 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
655 } 649 }
656 ++sbio->count; 650 ++sbio->count;
657 if (sbio->count == SCRUB_PAGES_PER_BIO || force) { 651 if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
658 int ret; 652 int ret;
659 653
660 ret = scrub_submit(sdev); 654 ret = scrub_submit(sdev);
661 if (ret) 655 if (ret)
662 return ret; 656 return ret;
663 } 657 }
664 658
665 return 0; 659 return 0;
666 } 660 }
667 661
668 static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len, 662 static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
669 u8 *csum) 663 u8 *csum)
670 { 664 {
671 struct btrfs_ordered_sum *sum = NULL; 665 struct btrfs_ordered_sum *sum = NULL;
672 int ret = 0; 666 int ret = 0;
673 unsigned long i; 667 unsigned long i;
674 unsigned long num_sectors; 668 unsigned long num_sectors;
675 u32 sectorsize = sdev->dev->dev_root->sectorsize; 669 u32 sectorsize = sdev->dev->dev_root->sectorsize;
676 670
677 while (!list_empty(&sdev->csum_list)) { 671 while (!list_empty(&sdev->csum_list)) {
678 sum = list_first_entry(&sdev->csum_list, 672 sum = list_first_entry(&sdev->csum_list,
679 struct btrfs_ordered_sum, list); 673 struct btrfs_ordered_sum, list);
680 if (sum->bytenr > logical) 674 if (sum->bytenr > logical)
681 return 0; 675 return 0;
682 if (sum->bytenr + sum->len > logical) 676 if (sum->bytenr + sum->len > logical)
683 break; 677 break;
684 678
685 ++sdev->stat.csum_discards; 679 ++sdev->stat.csum_discards;
686 list_del(&sum->list); 680 list_del(&sum->list);
687 kfree(sum); 681 kfree(sum);
688 sum = NULL; 682 sum = NULL;
689 } 683 }
690 if (!sum) 684 if (!sum)
691 return 0; 685 return 0;
692 686
693 num_sectors = sum->len / sectorsize; 687 num_sectors = sum->len / sectorsize;
694 for (i = 0; i < num_sectors; ++i) { 688 for (i = 0; i < num_sectors; ++i) {
695 if (sum->sums[i].bytenr == logical) { 689 if (sum->sums[i].bytenr == logical) {
696 memcpy(csum, &sum->sums[i].sum, sdev->csum_size); 690 memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
697 ret = 1; 691 ret = 1;
698 break; 692 break;
699 } 693 }
700 } 694 }
701 if (ret && i == num_sectors - 1) { 695 if (ret && i == num_sectors - 1) {
702 list_del(&sum->list); 696 list_del(&sum->list);
703 kfree(sum); 697 kfree(sum);
704 } 698 }
705 return ret; 699 return ret;
706 } 700 }
707 701
708 /* scrub extent tries to collect up to 64 kB for each bio */ 702 /* scrub extent tries to collect up to 64 kB for each bio */
709 static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len, 703 static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
710 u64 physical, u64 flags, u64 gen, u64 mirror_num) 704 u64 physical, u64 flags, u64 gen, u64 mirror_num)
711 { 705 {
712 int ret; 706 int ret;
713 u8 csum[BTRFS_CSUM_SIZE]; 707 u8 csum[BTRFS_CSUM_SIZE];
714 708
715 while (len) { 709 while (len) {
716 u64 l = min_t(u64, len, PAGE_SIZE); 710 u64 l = min_t(u64, len, PAGE_SIZE);
717 int have_csum = 0; 711 int have_csum = 0;
718 712
719 if (flags & BTRFS_EXTENT_FLAG_DATA) { 713 if (flags & BTRFS_EXTENT_FLAG_DATA) {
720 /* push csums to sbio */ 714 /* push csums to sbio */
721 have_csum = scrub_find_csum(sdev, logical, l, csum); 715 have_csum = scrub_find_csum(sdev, logical, l, csum);
722 if (have_csum == 0) 716 if (have_csum == 0)
723 ++sdev->stat.no_csum; 717 ++sdev->stat.no_csum;
724 } 718 }
725 ret = scrub_page(sdev, logical, l, physical, flags, gen, 719 ret = scrub_page(sdev, logical, l, physical, flags, gen,
726 mirror_num, have_csum ? csum : NULL, 0); 720 mirror_num, have_csum ? csum : NULL, 0);
727 if (ret) 721 if (ret)
728 return ret; 722 return ret;
729 len -= l; 723 len -= l;
730 logical += l; 724 logical += l;
731 physical += l; 725 physical += l;
732 } 726 }
733 return 0; 727 return 0;
734 } 728 }
735 729
736 static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, 730 static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
737 struct map_lookup *map, int num, u64 base, u64 length) 731 struct map_lookup *map, int num, u64 base, u64 length)
738 { 732 {
739 struct btrfs_path *path; 733 struct btrfs_path *path;
740 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 734 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
741 struct btrfs_root *root = fs_info->extent_root; 735 struct btrfs_root *root = fs_info->extent_root;
742 struct btrfs_root *csum_root = fs_info->csum_root; 736 struct btrfs_root *csum_root = fs_info->csum_root;
743 struct btrfs_extent_item *extent; 737 struct btrfs_extent_item *extent;
744 struct blk_plug plug; 738 struct blk_plug plug;
745 u64 flags; 739 u64 flags;
746 int ret; 740 int ret;
747 int slot; 741 int slot;
748 int i; 742 int i;
749 u64 nstripes; 743 u64 nstripes;
750 int start_stripe; 744 int start_stripe;
751 struct extent_buffer *l; 745 struct extent_buffer *l;
752 struct btrfs_key key; 746 struct btrfs_key key;
753 u64 physical; 747 u64 physical;
754 u64 logical; 748 u64 logical;
755 u64 generation; 749 u64 generation;
756 u64 mirror_num; 750 u64 mirror_num;
757 751
758 u64 increment = map->stripe_len; 752 u64 increment = map->stripe_len;
759 u64 offset; 753 u64 offset;
760 754
761 nstripes = length; 755 nstripes = length;
762 offset = 0; 756 offset = 0;
763 do_div(nstripes, map->stripe_len); 757 do_div(nstripes, map->stripe_len);
764 if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 758 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
765 offset = map->stripe_len * num; 759 offset = map->stripe_len * num;
766 increment = map->stripe_len * map->num_stripes; 760 increment = map->stripe_len * map->num_stripes;
767 mirror_num = 0; 761 mirror_num = 0;
768 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 762 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
769 int factor = map->num_stripes / map->sub_stripes; 763 int factor = map->num_stripes / map->sub_stripes;
770 offset = map->stripe_len * (num / map->sub_stripes); 764 offset = map->stripe_len * (num / map->sub_stripes);
771 increment = map->stripe_len * factor; 765 increment = map->stripe_len * factor;
772 mirror_num = num % map->sub_stripes; 766 mirror_num = num % map->sub_stripes;
773 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 767 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
774 increment = map->stripe_len; 768 increment = map->stripe_len;
775 mirror_num = num % map->num_stripes; 769 mirror_num = num % map->num_stripes;
776 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 770 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
777 increment = map->stripe_len; 771 increment = map->stripe_len;
778 mirror_num = num % map->num_stripes; 772 mirror_num = num % map->num_stripes;
779 } else { 773 } else {
780 increment = map->stripe_len; 774 increment = map->stripe_len;
781 mirror_num = 0; 775 mirror_num = 0;
782 } 776 }
783 777
784 path = btrfs_alloc_path(); 778 path = btrfs_alloc_path();
785 if (!path) 779 if (!path)
786 return -ENOMEM; 780 return -ENOMEM;
787 781
788 path->reada = 2; 782 path->reada = 2;
789 path->search_commit_root = 1; 783 path->search_commit_root = 1;
790 path->skip_locking = 1; 784 path->skip_locking = 1;
791 785
792 /* 786 /*
793 * find all extents for each stripe and just read them to get 787 * find all extents for each stripe and just read them to get
794 * them into the page cache 788 * them into the page cache
795 * FIXME: we can do better. build a more intelligent prefetching 789 * FIXME: we can do better. build a more intelligent prefetching
796 */ 790 */
797 logical = base + offset; 791 logical = base + offset;
798 physical = map->stripes[num].physical; 792 physical = map->stripes[num].physical;
799 ret = 0; 793 ret = 0;
800 for (i = 0; i < nstripes; ++i) { 794 for (i = 0; i < nstripes; ++i) {
801 key.objectid = logical; 795 key.objectid = logical;
802 key.type = BTRFS_EXTENT_ITEM_KEY; 796 key.type = BTRFS_EXTENT_ITEM_KEY;
803 key.offset = (u64)0; 797 key.offset = (u64)0;
804 798
805 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 799 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
806 if (ret < 0) 800 if (ret < 0)
807 goto out_noplug; 801 goto out_noplug;
808 802
809 /* 803 /*
810 * we might miss half an extent here, but that doesn't matter, 804 * we might miss half an extent here, but that doesn't matter,
811 * as it's only the prefetch 805 * as it's only the prefetch
812 */ 806 */
813 while (1) { 807 while (1) {
814 l = path->nodes[0]; 808 l = path->nodes[0];
815 slot = path->slots[0]; 809 slot = path->slots[0];
816 if (slot >= btrfs_header_nritems(l)) { 810 if (slot >= btrfs_header_nritems(l)) {
817 ret = btrfs_next_leaf(root, path); 811 ret = btrfs_next_leaf(root, path);
818 if (ret == 0) 812 if (ret == 0)
819 continue; 813 continue;
820 if (ret < 0) 814 if (ret < 0)
821 goto out_noplug; 815 goto out_noplug;
822 816
823 break; 817 break;
824 } 818 }
825 btrfs_item_key_to_cpu(l, &key, slot); 819 btrfs_item_key_to_cpu(l, &key, slot);
826 820
827 if (key.objectid >= logical + map->stripe_len) 821 if (key.objectid >= logical + map->stripe_len)
828 break; 822 break;
829 823
830 path->slots[0]++; 824 path->slots[0]++;
831 } 825 }
832 btrfs_release_path(path); 826 btrfs_release_path(path);
833 logical += increment; 827 logical += increment;
834 physical += map->stripe_len; 828 physical += map->stripe_len;
835 cond_resched(); 829 cond_resched();
836 } 830 }
837 831
838 /* 832 /*
839 * collect all data csums for the stripe to avoid seeking during 833 * collect all data csums for the stripe to avoid seeking during
840 * the scrub. This might currently (crc32) end up to be about 1MB 834 * the scrub. This might currently (crc32) end up to be about 1MB
841 */ 835 */
842 start_stripe = 0; 836 start_stripe = 0;
843 blk_start_plug(&plug); 837 blk_start_plug(&plug);
844 again: 838 again:
845 logical = base + offset + start_stripe * increment; 839 logical = base + offset + start_stripe * increment;
846 for (i = start_stripe; i < nstripes; ++i) { 840 for (i = start_stripe; i < nstripes; ++i) {
847 ret = btrfs_lookup_csums_range(csum_root, logical, 841 ret = btrfs_lookup_csums_range(csum_root, logical,
848 logical + map->stripe_len - 1, 842 logical + map->stripe_len - 1,
849 &sdev->csum_list, 1); 843 &sdev->csum_list, 1);
850 if (ret) 844 if (ret)
851 goto out; 845 goto out;
852 846
853 logical += increment; 847 logical += increment;
854 cond_resched(); 848 cond_resched();
855 } 849 }
856 /* 850 /*
857 * now find all extents for each stripe and scrub them 851 * now find all extents for each stripe and scrub them
858 */ 852 */
859 logical = base + offset + start_stripe * increment; 853 logical = base + offset + start_stripe * increment;
860 physical = map->stripes[num].physical + start_stripe * map->stripe_len; 854 physical = map->stripes[num].physical + start_stripe * map->stripe_len;
861 ret = 0; 855 ret = 0;
862 for (i = start_stripe; i < nstripes; ++i) { 856 for (i = start_stripe; i < nstripes; ++i) {
863 /* 857 /*
864 * canceled? 858 * canceled?
865 */ 859 */
866 if (atomic_read(&fs_info->scrub_cancel_req) || 860 if (atomic_read(&fs_info->scrub_cancel_req) ||
867 atomic_read(&sdev->cancel_req)) { 861 atomic_read(&sdev->cancel_req)) {
868 ret = -ECANCELED; 862 ret = -ECANCELED;
869 goto out; 863 goto out;
870 } 864 }
871 /* 865 /*
872 * check to see if we have to pause 866 * check to see if we have to pause
873 */ 867 */
874 if (atomic_read(&fs_info->scrub_pause_req)) { 868 if (atomic_read(&fs_info->scrub_pause_req)) {
875 /* push queued extents */ 869 /* push queued extents */
876 scrub_submit(sdev); 870 scrub_submit(sdev);
877 wait_event(sdev->list_wait, 871 wait_event(sdev->list_wait,
878 atomic_read(&sdev->in_flight) == 0); 872 atomic_read(&sdev->in_flight) == 0);
879 atomic_inc(&fs_info->scrubs_paused); 873 atomic_inc(&fs_info->scrubs_paused);
880 wake_up(&fs_info->scrub_pause_wait); 874 wake_up(&fs_info->scrub_pause_wait);
881 mutex_lock(&fs_info->scrub_lock); 875 mutex_lock(&fs_info->scrub_lock);
882 while (atomic_read(&fs_info->scrub_pause_req)) { 876 while (atomic_read(&fs_info->scrub_pause_req)) {
883 mutex_unlock(&fs_info->scrub_lock); 877 mutex_unlock(&fs_info->scrub_lock);
884 wait_event(fs_info->scrub_pause_wait, 878 wait_event(fs_info->scrub_pause_wait,
885 atomic_read(&fs_info->scrub_pause_req) == 0); 879 atomic_read(&fs_info->scrub_pause_req) == 0);
886 mutex_lock(&fs_info->scrub_lock); 880 mutex_lock(&fs_info->scrub_lock);
887 } 881 }
888 atomic_dec(&fs_info->scrubs_paused); 882 atomic_dec(&fs_info->scrubs_paused);
889 mutex_unlock(&fs_info->scrub_lock); 883 mutex_unlock(&fs_info->scrub_lock);
890 wake_up(&fs_info->scrub_pause_wait); 884 wake_up(&fs_info->scrub_pause_wait);
891 scrub_free_csums(sdev); 885 scrub_free_csums(sdev);
892 start_stripe = i; 886 start_stripe = i;
893 goto again; 887 goto again;
894 } 888 }
895 889
896 key.objectid = logical; 890 key.objectid = logical;
897 key.type = BTRFS_EXTENT_ITEM_KEY; 891 key.type = BTRFS_EXTENT_ITEM_KEY;
898 key.offset = (u64)0; 892 key.offset = (u64)0;
899 893
900 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 894 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
901 if (ret < 0) 895 if (ret < 0)
902 goto out; 896 goto out;
903 if (ret > 0) { 897 if (ret > 0) {
904 ret = btrfs_previous_item(root, path, 0, 898 ret = btrfs_previous_item(root, path, 0,
905 BTRFS_EXTENT_ITEM_KEY); 899 BTRFS_EXTENT_ITEM_KEY);
906 if (ret < 0) 900 if (ret < 0)
907 goto out; 901 goto out;
908 if (ret > 0) { 902 if (ret > 0) {
909 /* there's no smaller item, so stick with the 903 /* there's no smaller item, so stick with the
910 * larger one */ 904 * larger one */
911 btrfs_release_path(path); 905 btrfs_release_path(path);
912 ret = btrfs_search_slot(NULL, root, &key, 906 ret = btrfs_search_slot(NULL, root, &key,
913 path, 0, 0); 907 path, 0, 0);
914 if (ret < 0) 908 if (ret < 0)
915 goto out; 909 goto out;
916 } 910 }
917 } 911 }
918 912
919 while (1) { 913 while (1) {
920 l = path->nodes[0]; 914 l = path->nodes[0];
921 slot = path->slots[0]; 915 slot = path->slots[0];
922 if (slot >= btrfs_header_nritems(l)) { 916 if (slot >= btrfs_header_nritems(l)) {
923 ret = btrfs_next_leaf(root, path); 917 ret = btrfs_next_leaf(root, path);
924 if (ret == 0) 918 if (ret == 0)
925 continue; 919 continue;
926 if (ret < 0) 920 if (ret < 0)
927 goto out; 921 goto out;
928 922
929 break; 923 break;
930 } 924 }
931 btrfs_item_key_to_cpu(l, &key, slot); 925 btrfs_item_key_to_cpu(l, &key, slot);
932 926
933 if (key.objectid + key.offset <= logical) 927 if (key.objectid + key.offset <= logical)
934 goto next; 928 goto next;
935 929
936 if (key.objectid >= logical + map->stripe_len) 930 if (key.objectid >= logical + map->stripe_len)
937 break; 931 break;
938 932
939 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) 933 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
940 goto next; 934 goto next;
941 935
942 extent = btrfs_item_ptr(l, slot, 936 extent = btrfs_item_ptr(l, slot,
943 struct btrfs_extent_item); 937 struct btrfs_extent_item);
944 flags = btrfs_extent_flags(l, extent); 938 flags = btrfs_extent_flags(l, extent);
945 generation = btrfs_extent_generation(l, extent); 939 generation = btrfs_extent_generation(l, extent);
946 940
947 if (key.objectid < logical && 941 if (key.objectid < logical &&
948 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { 942 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
949 printk(KERN_ERR 943 printk(KERN_ERR
950 "btrfs scrub: tree block %llu spanning " 944 "btrfs scrub: tree block %llu spanning "
951 "stripes, ignored. logical=%llu\n", 945 "stripes, ignored. logical=%llu\n",
952 (unsigned long long)key.objectid, 946 (unsigned long long)key.objectid,
953 (unsigned long long)logical); 947 (unsigned long long)logical);
954 goto next; 948 goto next;
955 } 949 }
956 950
957 /* 951 /*
958 * trim extent to this stripe 952 * trim extent to this stripe
959 */ 953 */
960 if (key.objectid < logical) { 954 if (key.objectid < logical) {
961 key.offset -= logical - key.objectid; 955 key.offset -= logical - key.objectid;
962 key.objectid = logical; 956 key.objectid = logical;
963 } 957 }
964 if (key.objectid + key.offset > 958 if (key.objectid + key.offset >
965 logical + map->stripe_len) { 959 logical + map->stripe_len) {
966 key.offset = logical + map->stripe_len - 960 key.offset = logical + map->stripe_len -
967 key.objectid; 961 key.objectid;
968 } 962 }
969 963
970 ret = scrub_extent(sdev, key.objectid, key.offset, 964 ret = scrub_extent(sdev, key.objectid, key.offset,
971 key.objectid - logical + physical, 965 key.objectid - logical + physical,
972 flags, generation, mirror_num); 966 flags, generation, mirror_num);
973 if (ret) 967 if (ret)
974 goto out; 968 goto out;
975 969
976 next: 970 next:
977 path->slots[0]++; 971 path->slots[0]++;
978 } 972 }
979 btrfs_release_path(path); 973 btrfs_release_path(path);
980 logical += increment; 974 logical += increment;
981 physical += map->stripe_len; 975 physical += map->stripe_len;
982 spin_lock(&sdev->stat_lock); 976 spin_lock(&sdev->stat_lock);
983 sdev->stat.last_physical = physical; 977 sdev->stat.last_physical = physical;
984 spin_unlock(&sdev->stat_lock); 978 spin_unlock(&sdev->stat_lock);
985 } 979 }
986 /* push queued extents */ 980 /* push queued extents */
987 scrub_submit(sdev); 981 scrub_submit(sdev);
988 982
989 out: 983 out:
990 blk_finish_plug(&plug); 984 blk_finish_plug(&plug);
991 out_noplug: 985 out_noplug:
992 btrfs_free_path(path); 986 btrfs_free_path(path);
993 return ret < 0 ? ret : 0; 987 return ret < 0 ? ret : 0;
994 } 988 }
995 989
996 static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, 990 static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
997 u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length) 991 u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length)
998 { 992 {
999 struct btrfs_mapping_tree *map_tree = 993 struct btrfs_mapping_tree *map_tree =
1000 &sdev->dev->dev_root->fs_info->mapping_tree; 994 &sdev->dev->dev_root->fs_info->mapping_tree;
1001 struct map_lookup *map; 995 struct map_lookup *map;
1002 struct extent_map *em; 996 struct extent_map *em;
1003 int i; 997 int i;
1004 int ret = -EINVAL; 998 int ret = -EINVAL;
1005 999
1006 read_lock(&map_tree->map_tree.lock); 1000 read_lock(&map_tree->map_tree.lock);
1007 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 1001 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
1008 read_unlock(&map_tree->map_tree.lock); 1002 read_unlock(&map_tree->map_tree.lock);
1009 1003
1010 if (!em) 1004 if (!em)
1011 return -EINVAL; 1005 return -EINVAL;
1012 1006
1013 map = (struct map_lookup *)em->bdev; 1007 map = (struct map_lookup *)em->bdev;
1014 if (em->start != chunk_offset) 1008 if (em->start != chunk_offset)
1015 goto out; 1009 goto out;
1016 1010
1017 if (em->len < length) 1011 if (em->len < length)
1018 goto out; 1012 goto out;
1019 1013
1020 for (i = 0; i < map->num_stripes; ++i) { 1014 for (i = 0; i < map->num_stripes; ++i) {
1021 if (map->stripes[i].dev == sdev->dev) { 1015 if (map->stripes[i].dev == sdev->dev) {
1022 ret = scrub_stripe(sdev, map, i, chunk_offset, length); 1016 ret = scrub_stripe(sdev, map, i, chunk_offset, length);
1023 if (ret) 1017 if (ret)
1024 goto out; 1018 goto out;
1025 } 1019 }
1026 } 1020 }
1027 out: 1021 out:
1028 free_extent_map(em); 1022 free_extent_map(em);
1029 1023
1030 return ret; 1024 return ret;
1031 } 1025 }
1032 1026
1033 static noinline_for_stack 1027 static noinline_for_stack
1034 int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) 1028 int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1035 { 1029 {
1036 struct btrfs_dev_extent *dev_extent = NULL; 1030 struct btrfs_dev_extent *dev_extent = NULL;
1037 struct btrfs_path *path; 1031 struct btrfs_path *path;
1038 struct btrfs_root *root = sdev->dev->dev_root; 1032 struct btrfs_root *root = sdev->dev->dev_root;
1039 struct btrfs_fs_info *fs_info = root->fs_info; 1033 struct btrfs_fs_info *fs_info = root->fs_info;
1040 u64 length; 1034 u64 length;
1041 u64 chunk_tree; 1035 u64 chunk_tree;
1042 u64 chunk_objectid; 1036 u64 chunk_objectid;
1043 u64 chunk_offset; 1037 u64 chunk_offset;
1044 int ret; 1038 int ret;
1045 int slot; 1039 int slot;
1046 struct extent_buffer *l; 1040 struct extent_buffer *l;
1047 struct btrfs_key key; 1041 struct btrfs_key key;
1048 struct btrfs_key found_key; 1042 struct btrfs_key found_key;
1049 struct btrfs_block_group_cache *cache; 1043 struct btrfs_block_group_cache *cache;
1050 1044
1051 path = btrfs_alloc_path(); 1045 path = btrfs_alloc_path();
1052 if (!path) 1046 if (!path)
1053 return -ENOMEM; 1047 return -ENOMEM;
1054 1048
1055 path->reada = 2; 1049 path->reada = 2;
1056 path->search_commit_root = 1; 1050 path->search_commit_root = 1;
1057 path->skip_locking = 1; 1051 path->skip_locking = 1;
1058 1052
1059 key.objectid = sdev->dev->devid; 1053 key.objectid = sdev->dev->devid;
1060 key.offset = 0ull; 1054 key.offset = 0ull;
1061 key.type = BTRFS_DEV_EXTENT_KEY; 1055 key.type = BTRFS_DEV_EXTENT_KEY;
1062 1056
1063 1057
1064 while (1) { 1058 while (1) {
1065 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1059 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1066 if (ret < 0) 1060 if (ret < 0)
1067 break; 1061 break;
1068 if (ret > 0) { 1062 if (ret > 0) {
1069 if (path->slots[0] >= 1063 if (path->slots[0] >=
1070 btrfs_header_nritems(path->nodes[0])) { 1064 btrfs_header_nritems(path->nodes[0])) {
1071 ret = btrfs_next_leaf(root, path); 1065 ret = btrfs_next_leaf(root, path);
1072 if (ret) 1066 if (ret)
1073 break; 1067 break;
1074 } 1068 }
1075 } 1069 }
1076 1070
1077 l = path->nodes[0]; 1071 l = path->nodes[0];
1078 slot = path->slots[0]; 1072 slot = path->slots[0];
1079 1073
1080 btrfs_item_key_to_cpu(l, &found_key, slot); 1074 btrfs_item_key_to_cpu(l, &found_key, slot);
1081 1075
1082 if (found_key.objectid != sdev->dev->devid) 1076 if (found_key.objectid != sdev->dev->devid)
1083 break; 1077 break;
1084 1078
1085 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) 1079 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
1086 break; 1080 break;
1087 1081
1088 if (found_key.offset >= end) 1082 if (found_key.offset >= end)
1089 break; 1083 break;
1090 1084
1091 if (found_key.offset < key.offset) 1085 if (found_key.offset < key.offset)
1092 break; 1086 break;
1093 1087
1094 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); 1088 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1095 length = btrfs_dev_extent_length(l, dev_extent); 1089 length = btrfs_dev_extent_length(l, dev_extent);
1096 1090
1097 if (found_key.offset + length <= start) { 1091 if (found_key.offset + length <= start) {
1098 key.offset = found_key.offset + length; 1092 key.offset = found_key.offset + length;
1099 btrfs_release_path(path); 1093 btrfs_release_path(path);
1100 continue; 1094 continue;
1101 } 1095 }
1102 1096
1103 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); 1097 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1104 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); 1098 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1105 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); 1099 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1106 1100
1107 /* 1101 /*
1108 * get a reference on the corresponding block group to prevent 1102 * get a reference on the corresponding block group to prevent
1109 * the chunk from going away while we scrub it 1103 * the chunk from going away while we scrub it
1110 */ 1104 */
1111 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 1105 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
1112 if (!cache) { 1106 if (!cache) {
1113 ret = -ENOENT; 1107 ret = -ENOENT;
1114 break; 1108 break;
1115 } 1109 }
1116 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, 1110 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
1117 chunk_offset, length); 1111 chunk_offset, length);
1118 btrfs_put_block_group(cache); 1112 btrfs_put_block_group(cache);
1119 if (ret) 1113 if (ret)
1120 break; 1114 break;
1121 1115
1122 key.offset = found_key.offset + length; 1116 key.offset = found_key.offset + length;
1123 btrfs_release_path(path); 1117 btrfs_release_path(path);
1124 } 1118 }
1125 1119
1126 btrfs_free_path(path); 1120 btrfs_free_path(path);
1127 1121
1128 /* 1122 /*
1129 * ret can still be 1 from search_slot or next_leaf, 1123 * ret can still be 1 from search_slot or next_leaf,
1130 * that's not an error 1124 * that's not an error
1131 */ 1125 */
1132 return ret < 0 ? ret : 0; 1126 return ret < 0 ? ret : 0;
1133 } 1127 }
1134 1128
1135 static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) 1129 static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
1136 { 1130 {
1137 int i; 1131 int i;
1138 u64 bytenr; 1132 u64 bytenr;
1139 u64 gen; 1133 u64 gen;
1140 int ret; 1134 int ret;
1141 struct btrfs_device *device = sdev->dev; 1135 struct btrfs_device *device = sdev->dev;
1142 struct btrfs_root *root = device->dev_root; 1136 struct btrfs_root *root = device->dev_root;
1143 1137
1144 gen = root->fs_info->last_trans_committed; 1138 gen = root->fs_info->last_trans_committed;
1145 1139
1146 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 1140 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1147 bytenr = btrfs_sb_offset(i); 1141 bytenr = btrfs_sb_offset(i);
1148 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) 1142 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
1149 break; 1143 break;
1150 1144
1151 ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr, 1145 ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr,
1152 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1); 1146 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
1153 if (ret) 1147 if (ret)
1154 return ret; 1148 return ret;
1155 } 1149 }
1156 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); 1150 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
1157 1151
1158 return 0; 1152 return 0;
1159 } 1153 }
1160 1154
1161 /* 1155 /*
1162 * get a reference count on fs_info->scrub_workers. start worker if necessary 1156 * get a reference count on fs_info->scrub_workers. start worker if necessary
1163 */ 1157 */
1164 static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) 1158 static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
1165 { 1159 {
1166 struct btrfs_fs_info *fs_info = root->fs_info; 1160 struct btrfs_fs_info *fs_info = root->fs_info;
1167 1161
1168 mutex_lock(&fs_info->scrub_lock); 1162 mutex_lock(&fs_info->scrub_lock);
1169 if (fs_info->scrub_workers_refcnt == 0) { 1163 if (fs_info->scrub_workers_refcnt == 0) {
1170 btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1164 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
1171 fs_info->thread_pool_size, &fs_info->generic_worker); 1165 fs_info->thread_pool_size, &fs_info->generic_worker);
1172 fs_info->scrub_workers.idle_thresh = 4; 1166 fs_info->scrub_workers.idle_thresh = 4;
1173 btrfs_start_workers(&fs_info->scrub_workers, 1); 1167 btrfs_start_workers(&fs_info->scrub_workers, 1);
1174 } 1168 }
1175 ++fs_info->scrub_workers_refcnt; 1169 ++fs_info->scrub_workers_refcnt;
1176 mutex_unlock(&fs_info->scrub_lock); 1170 mutex_unlock(&fs_info->scrub_lock);
1177 1171
1178 return 0; 1172 return 0;
1179 } 1173 }
1180 1174
1181 static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) 1175 static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
1182 { 1176 {
1183 struct btrfs_fs_info *fs_info = root->fs_info; 1177 struct btrfs_fs_info *fs_info = root->fs_info;
1184 1178
1185 mutex_lock(&fs_info->scrub_lock); 1179 mutex_lock(&fs_info->scrub_lock);
1186 if (--fs_info->scrub_workers_refcnt == 0) 1180 if (--fs_info->scrub_workers_refcnt == 0)
1187 btrfs_stop_workers(&fs_info->scrub_workers); 1181 btrfs_stop_workers(&fs_info->scrub_workers);
1188 WARN_ON(fs_info->scrub_workers_refcnt < 0); 1182 WARN_ON(fs_info->scrub_workers_refcnt < 0);
1189 mutex_unlock(&fs_info->scrub_lock); 1183 mutex_unlock(&fs_info->scrub_lock);
1190 } 1184 }
1191 1185
1192 1186
1193 int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, 1187 int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
1194 struct btrfs_scrub_progress *progress, int readonly) 1188 struct btrfs_scrub_progress *progress, int readonly)
1195 { 1189 {
1196 struct scrub_dev *sdev; 1190 struct scrub_dev *sdev;
1197 struct btrfs_fs_info *fs_info = root->fs_info; 1191 struct btrfs_fs_info *fs_info = root->fs_info;
1198 int ret; 1192 int ret;
1199 struct btrfs_device *dev; 1193 struct btrfs_device *dev;
1200 1194
1201 if (btrfs_fs_closing(root->fs_info)) 1195 if (btrfs_fs_closing(root->fs_info))
1202 return -EINVAL; 1196 return -EINVAL;
1203 1197
1204 /* 1198 /*
1205 * check some assumptions 1199 * check some assumptions
1206 */ 1200 */
1207 if (root->sectorsize != PAGE_SIZE || 1201 if (root->sectorsize != PAGE_SIZE ||
1208 root->sectorsize != root->leafsize || 1202 root->sectorsize != root->leafsize ||
1209 root->sectorsize != root->nodesize) { 1203 root->sectorsize != root->nodesize) {
1210 printk(KERN_ERR "btrfs_scrub: size assumptions fail\n"); 1204 printk(KERN_ERR "btrfs_scrub: size assumptions fail\n");
1211 return -EINVAL; 1205 return -EINVAL;
1212 } 1206 }
1213 1207
1214 ret = scrub_workers_get(root); 1208 ret = scrub_workers_get(root);
1215 if (ret) 1209 if (ret)
1216 return ret; 1210 return ret;
1217 1211
1218 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1212 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1219 dev = btrfs_find_device(root, devid, NULL, NULL); 1213 dev = btrfs_find_device(root, devid, NULL, NULL);
1220 if (!dev || dev->missing) { 1214 if (!dev || dev->missing) {
1221 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1215 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1222 scrub_workers_put(root); 1216 scrub_workers_put(root);
1223 return -ENODEV; 1217 return -ENODEV;
1224 } 1218 }
1225 mutex_lock(&fs_info->scrub_lock); 1219 mutex_lock(&fs_info->scrub_lock);
1226 1220
1227 if (!dev->in_fs_metadata) { 1221 if (!dev->in_fs_metadata) {
1228 mutex_unlock(&fs_info->scrub_lock); 1222 mutex_unlock(&fs_info->scrub_lock);
1229 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1223 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1230 scrub_workers_put(root); 1224 scrub_workers_put(root);
1231 return -ENODEV; 1225 return -ENODEV;
1232 } 1226 }
1233 1227
1234 if (dev->scrub_device) { 1228 if (dev->scrub_device) {
1235 mutex_unlock(&fs_info->scrub_lock); 1229 mutex_unlock(&fs_info->scrub_lock);
1236 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1230 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1237 scrub_workers_put(root); 1231 scrub_workers_put(root);
1238 return -EINPROGRESS; 1232 return -EINPROGRESS;
1239 } 1233 }
1240 sdev = scrub_setup_dev(dev); 1234 sdev = scrub_setup_dev(dev);
1241 if (IS_ERR(sdev)) { 1235 if (IS_ERR(sdev)) {
1242 mutex_unlock(&fs_info->scrub_lock); 1236 mutex_unlock(&fs_info->scrub_lock);
1243 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1237 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1244 scrub_workers_put(root); 1238 scrub_workers_put(root);
1245 return PTR_ERR(sdev); 1239 return PTR_ERR(sdev);
1246 } 1240 }
1247 sdev->readonly = readonly; 1241 sdev->readonly = readonly;
1248 dev->scrub_device = sdev; 1242 dev->scrub_device = sdev;
1249 1243
1250 atomic_inc(&fs_info->scrubs_running); 1244 atomic_inc(&fs_info->scrubs_running);
1251 mutex_unlock(&fs_info->scrub_lock); 1245 mutex_unlock(&fs_info->scrub_lock);
1252 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1246 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1253 1247
1254 down_read(&fs_info->scrub_super_lock); 1248 down_read(&fs_info->scrub_super_lock);
1255 ret = scrub_supers(sdev); 1249 ret = scrub_supers(sdev);
1256 up_read(&fs_info->scrub_super_lock); 1250 up_read(&fs_info->scrub_super_lock);
1257 1251
1258 if (!ret) 1252 if (!ret)
1259 ret = scrub_enumerate_chunks(sdev, start, end); 1253 ret = scrub_enumerate_chunks(sdev, start, end);
1260 1254
1261 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); 1255 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
1262 1256
1263 atomic_dec(&fs_info->scrubs_running); 1257 atomic_dec(&fs_info->scrubs_running);
1264 wake_up(&fs_info->scrub_pause_wait); 1258 wake_up(&fs_info->scrub_pause_wait);
1265 1259
1266 if (progress) 1260 if (progress)
1267 memcpy(progress, &sdev->stat, sizeof(*progress)); 1261 memcpy(progress, &sdev->stat, sizeof(*progress));
1268 1262
1269 mutex_lock(&fs_info->scrub_lock); 1263 mutex_lock(&fs_info->scrub_lock);
1270 dev->scrub_device = NULL; 1264 dev->scrub_device = NULL;
1271 mutex_unlock(&fs_info->scrub_lock); 1265 mutex_unlock(&fs_info->scrub_lock);
1272 1266
1273 scrub_free_dev(sdev); 1267 scrub_free_dev(sdev);
1274 scrub_workers_put(root); 1268 scrub_workers_put(root);
1275 1269
1276 return ret; 1270 return ret;
1277 } 1271 }
1278 1272
1279 int btrfs_scrub_pause(struct btrfs_root *root) 1273 int btrfs_scrub_pause(struct btrfs_root *root)
1280 { 1274 {
1281 struct btrfs_fs_info *fs_info = root->fs_info; 1275 struct btrfs_fs_info *fs_info = root->fs_info;
1282 1276
1283 mutex_lock(&fs_info->scrub_lock); 1277 mutex_lock(&fs_info->scrub_lock);
1284 atomic_inc(&fs_info->scrub_pause_req); 1278 atomic_inc(&fs_info->scrub_pause_req);
1285 while (atomic_read(&fs_info->scrubs_paused) != 1279 while (atomic_read(&fs_info->scrubs_paused) !=
1286 atomic_read(&fs_info->scrubs_running)) { 1280 atomic_read(&fs_info->scrubs_running)) {
1287 mutex_unlock(&fs_info->scrub_lock); 1281 mutex_unlock(&fs_info->scrub_lock);
1288 wait_event(fs_info->scrub_pause_wait, 1282 wait_event(fs_info->scrub_pause_wait,
1289 atomic_read(&fs_info->scrubs_paused) == 1283 atomic_read(&fs_info->scrubs_paused) ==
1290 atomic_read(&fs_info->scrubs_running)); 1284 atomic_read(&fs_info->scrubs_running));
1291 mutex_lock(&fs_info->scrub_lock); 1285 mutex_lock(&fs_info->scrub_lock);
1292 } 1286 }
1293 mutex_unlock(&fs_info->scrub_lock); 1287 mutex_unlock(&fs_info->scrub_lock);
1294 1288
1295 return 0; 1289 return 0;
1296 } 1290 }
1297 1291
1298 int btrfs_scrub_continue(struct btrfs_root *root) 1292 int btrfs_scrub_continue(struct btrfs_root *root)
1299 { 1293 {
1300 struct btrfs_fs_info *fs_info = root->fs_info; 1294 struct btrfs_fs_info *fs_info = root->fs_info;
1301 1295
1302 atomic_dec(&fs_info->scrub_pause_req); 1296 atomic_dec(&fs_info->scrub_pause_req);
1303 wake_up(&fs_info->scrub_pause_wait); 1297 wake_up(&fs_info->scrub_pause_wait);
1304 return 0; 1298 return 0;
1305 } 1299 }
1306 1300
1307 int btrfs_scrub_pause_super(struct btrfs_root *root) 1301 int btrfs_scrub_pause_super(struct btrfs_root *root)
1308 { 1302 {
1309 down_write(&root->fs_info->scrub_super_lock); 1303 down_write(&root->fs_info->scrub_super_lock);
1310 return 0; 1304 return 0;
1311 } 1305 }
1312 1306
1313 int btrfs_scrub_continue_super(struct btrfs_root *root) 1307 int btrfs_scrub_continue_super(struct btrfs_root *root)
1314 { 1308 {
1315 up_write(&root->fs_info->scrub_super_lock); 1309 up_write(&root->fs_info->scrub_super_lock);
1316 return 0; 1310 return 0;
1317 } 1311 }
1318 1312
1319 int btrfs_scrub_cancel(struct btrfs_root *root) 1313 int btrfs_scrub_cancel(struct btrfs_root *root)
1320 { 1314 {
1321 struct btrfs_fs_info *fs_info = root->fs_info; 1315 struct btrfs_fs_info *fs_info = root->fs_info;
1322 1316
1323 mutex_lock(&fs_info->scrub_lock); 1317 mutex_lock(&fs_info->scrub_lock);
1324 if (!atomic_read(&fs_info->scrubs_running)) { 1318 if (!atomic_read(&fs_info->scrubs_running)) {
1325 mutex_unlock(&fs_info->scrub_lock); 1319 mutex_unlock(&fs_info->scrub_lock);
1326 return -ENOTCONN; 1320 return -ENOTCONN;
1327 } 1321 }
1328 1322
1329 atomic_inc(&fs_info->scrub_cancel_req); 1323 atomic_inc(&fs_info->scrub_cancel_req);
1330 while (atomic_read(&fs_info->scrubs_running)) { 1324 while (atomic_read(&fs_info->scrubs_running)) {
1331 mutex_unlock(&fs_info->scrub_lock); 1325 mutex_unlock(&fs_info->scrub_lock);
1332 wait_event(fs_info->scrub_pause_wait, 1326 wait_event(fs_info->scrub_pause_wait,
1333 atomic_read(&fs_info->scrubs_running) == 0); 1327 atomic_read(&fs_info->scrubs_running) == 0);
1334 mutex_lock(&fs_info->scrub_lock); 1328 mutex_lock(&fs_info->scrub_lock);
1335 } 1329 }
1336 atomic_dec(&fs_info->scrub_cancel_req); 1330 atomic_dec(&fs_info->scrub_cancel_req);
1337 mutex_unlock(&fs_info->scrub_lock); 1331 mutex_unlock(&fs_info->scrub_lock);
1338 1332
1339 return 0; 1333 return 0;
1340 } 1334 }
1341 1335
1342 int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) 1336 int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
1343 { 1337 {
1344 struct btrfs_fs_info *fs_info = root->fs_info; 1338 struct btrfs_fs_info *fs_info = root->fs_info;
1345 struct scrub_dev *sdev; 1339 struct scrub_dev *sdev;
1346 1340
1347 mutex_lock(&fs_info->scrub_lock); 1341 mutex_lock(&fs_info->scrub_lock);
1348 sdev = dev->scrub_device; 1342 sdev = dev->scrub_device;
1349 if (!sdev) { 1343 if (!sdev) {
1350 mutex_unlock(&fs_info->scrub_lock); 1344 mutex_unlock(&fs_info->scrub_lock);
1351 return -ENOTCONN; 1345 return -ENOTCONN;
1352 } 1346 }
1353 atomic_inc(&sdev->cancel_req); 1347 atomic_inc(&sdev->cancel_req);
1354 while (dev->scrub_device) { 1348 while (dev->scrub_device) {
1355 mutex_unlock(&fs_info->scrub_lock); 1349 mutex_unlock(&fs_info->scrub_lock);
1356 wait_event(fs_info->scrub_pause_wait, 1350 wait_event(fs_info->scrub_pause_wait,
1357 dev->scrub_device == NULL); 1351 dev->scrub_device == NULL);
1358 mutex_lock(&fs_info->scrub_lock); 1352 mutex_lock(&fs_info->scrub_lock);
1359 } 1353 }
1360 mutex_unlock(&fs_info->scrub_lock); 1354 mutex_unlock(&fs_info->scrub_lock);
1361 1355
1362 return 0; 1356 return 0;
1363 } 1357 }
1364 int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid) 1358 int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
1365 { 1359 {
1366 struct btrfs_fs_info *fs_info = root->fs_info; 1360 struct btrfs_fs_info *fs_info = root->fs_info;
1367 struct btrfs_device *dev; 1361 struct btrfs_device *dev;
1368 int ret; 1362 int ret;
1369 1363
1370 /* 1364 /*
1371 * we have to hold the device_list_mutex here so the device 1365 * we have to hold the device_list_mutex here so the device
1372 * does not go away in cancel_dev. FIXME: find a better solution 1366 * does not go away in cancel_dev. FIXME: find a better solution
1373 */ 1367 */
1374 mutex_lock(&fs_info->fs_devices->device_list_mutex); 1368 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1375 dev = btrfs_find_device(root, devid, NULL, NULL); 1369 dev = btrfs_find_device(root, devid, NULL, NULL);
1376 if (!dev) { 1370 if (!dev) {
1377 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 1371 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1378 return -ENODEV; 1372 return -ENODEV;
1379 } 1373 }
1380 ret = btrfs_scrub_cancel_dev(root, dev); 1374 ret = btrfs_scrub_cancel_dev(root, dev);
1381 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 1375 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1382 1376
1383 return ret; 1377 return ret;
1384 } 1378 }
1385 1379
1386 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 1380 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
1387 struct btrfs_scrub_progress *progress) 1381 struct btrfs_scrub_progress *progress)
1388 { 1382 {
1389 struct btrfs_device *dev; 1383 struct btrfs_device *dev;
1390 struct scrub_dev *sdev = NULL; 1384 struct scrub_dev *sdev = NULL;
1391 1385
1392 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1386 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1393 dev = btrfs_find_device(root, devid, NULL, NULL); 1387 dev = btrfs_find_device(root, devid, NULL, NULL);
1394 if (dev) 1388 if (dev)
1395 sdev = dev->scrub_device; 1389 sdev = dev->scrub_device;
1396 if (sdev) 1390 if (sdev)
1397 memcpy(progress, &sdev->stat, sizeof(*progress)); 1391 memcpy(progress, &sdev->stat, sizeof(*progress));
1398 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 1392 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1399 1393
1400 return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV; 1394 return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
1401 } 1395 }
1402 1396