Commit d2fdb776e08d4231d7e86a879cc663a93913c202

Authored by Mikulas Patocka
Committed by Alasdair G Kergon
1 parent d8ddb1cfff

dm snapshot: use merge origin if snapshot invalid

If the snapshot we are merging became invalid (e.g. it ran out of
space) redirect all I/O directly to the origin device.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>

Showing 1 changed file with 4 additions and 5 deletions Inline Diff

drivers/md/dm-snap.c
1 /* 1 /*
2 * dm-snapshot.c 2 * dm-snapshot.c
3 * 3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 * 5 *
6 * This file is released under the GPL. 6 * This file is released under the GPL.
7 */ 7 */
8 8
9 #include <linux/blkdev.h> 9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h> 10 #include <linux/device-mapper.h>
11 #include <linux/delay.h> 11 #include <linux/delay.h>
12 #include <linux/fs.h> 12 #include <linux/fs.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/kdev_t.h> 14 #include <linux/kdev_t.h>
15 #include <linux/list.h> 15 #include <linux/list.h>
16 #include <linux/mempool.h> 16 #include <linux/mempool.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 #include <linux/vmalloc.h> 19 #include <linux/vmalloc.h>
20 #include <linux/log2.h> 20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h> 21 #include <linux/dm-kcopyd.h>
22 #include <linux/workqueue.h> 22 #include <linux/workqueue.h>
23 23
24 #include "dm-exception-store.h" 24 #include "dm-exception-store.h"
25 25
26 #define DM_MSG_PREFIX "snapshots" 26 #define DM_MSG_PREFIX "snapshots"
27 27
28 static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; 28 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
29 29
30 #define dm_target_is_snapshot_merge(ti) \ 30 #define dm_target_is_snapshot_merge(ti) \
31 ((ti)->type->name == dm_snapshot_merge_target_name) 31 ((ti)->type->name == dm_snapshot_merge_target_name)
32 32
33 /* 33 /*
34 * The percentage increment we will wake up users at 34 * The percentage increment we will wake up users at
35 */ 35 */
36 #define WAKE_UP_PERCENT 5 36 #define WAKE_UP_PERCENT 5
37 37
38 /* 38 /*
39 * kcopyd priority of snapshot operations 39 * kcopyd priority of snapshot operations
40 */ 40 */
41 #define SNAPSHOT_COPY_PRIORITY 2 41 #define SNAPSHOT_COPY_PRIORITY 2
42 42
43 /* 43 /*
44 * Reserve 1MB for each snapshot initially (with minimum of 1 page). 44 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
45 */ 45 */
46 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) 46 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
47 47
48 /* 48 /*
49 * The size of the mempool used to track chunks in use. 49 * The size of the mempool used to track chunks in use.
50 */ 50 */
51 #define MIN_IOS 256 51 #define MIN_IOS 256
52 52
53 #define DM_TRACKED_CHUNK_HASH_SIZE 16 53 #define DM_TRACKED_CHUNK_HASH_SIZE 16
54 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ 54 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
55 (DM_TRACKED_CHUNK_HASH_SIZE - 1)) 55 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
56 56
57 struct dm_exception_table { 57 struct dm_exception_table {
58 uint32_t hash_mask; 58 uint32_t hash_mask;
59 unsigned hash_shift; 59 unsigned hash_shift;
60 struct list_head *table; 60 struct list_head *table;
61 }; 61 };
62 62
63 struct dm_snapshot { 63 struct dm_snapshot {
64 struct rw_semaphore lock; 64 struct rw_semaphore lock;
65 65
66 struct dm_dev *origin; 66 struct dm_dev *origin;
67 struct dm_dev *cow; 67 struct dm_dev *cow;
68 68
69 struct dm_target *ti; 69 struct dm_target *ti;
70 70
71 /* List of snapshots per Origin */ 71 /* List of snapshots per Origin */
72 struct list_head list; 72 struct list_head list;
73 73
74 /* 74 /*
75 * You can't use a snapshot if this is 0 (e.g. if full). 75 * You can't use a snapshot if this is 0 (e.g. if full).
76 * A snapshot-merge target never clears this. 76 * A snapshot-merge target never clears this.
77 */ 77 */
78 int valid; 78 int valid;
79 79
80 /* Origin writes don't trigger exceptions until this is set */ 80 /* Origin writes don't trigger exceptions until this is set */
81 int active; 81 int active;
82 82
83 /* Whether or not owning mapped_device is suspended */ 83 /* Whether or not owning mapped_device is suspended */
84 int suspended; 84 int suspended;
85 85
86 mempool_t *pending_pool; 86 mempool_t *pending_pool;
87 87
88 atomic_t pending_exceptions_count; 88 atomic_t pending_exceptions_count;
89 89
90 struct dm_exception_table pending; 90 struct dm_exception_table pending;
91 struct dm_exception_table complete; 91 struct dm_exception_table complete;
92 92
93 /* 93 /*
94 * pe_lock protects all pending_exception operations and access 94 * pe_lock protects all pending_exception operations and access
95 * as well as the snapshot_bios list. 95 * as well as the snapshot_bios list.
96 */ 96 */
97 spinlock_t pe_lock; 97 spinlock_t pe_lock;
98 98
99 /* The on disk metadata handler */ 99 /* The on disk metadata handler */
100 struct dm_exception_store *store; 100 struct dm_exception_store *store;
101 101
102 struct dm_kcopyd_client *kcopyd_client; 102 struct dm_kcopyd_client *kcopyd_client;
103 103
104 /* Queue of snapshot writes for ksnapd to flush */ 104 /* Queue of snapshot writes for ksnapd to flush */
105 struct bio_list queued_bios; 105 struct bio_list queued_bios;
106 struct work_struct queued_bios_work; 106 struct work_struct queued_bios_work;
107 107
108 /* Chunks with outstanding reads */ 108 /* Chunks with outstanding reads */
109 mempool_t *tracked_chunk_pool; 109 mempool_t *tracked_chunk_pool;
110 spinlock_t tracked_chunk_lock; 110 spinlock_t tracked_chunk_lock;
111 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 111 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
112 112
113 /* 113 /*
114 * The merge operation failed if this flag is set. 114 * The merge operation failed if this flag is set.
115 * Failure modes are handled as follows: 115 * Failure modes are handled as follows:
116 * - I/O error reading the header 116 * - I/O error reading the header
117 * => don't load the target; abort. 117 * => don't load the target; abort.
118 * - Header does not have "valid" flag set 118 * - Header does not have "valid" flag set
119 * => use the origin; forget about the snapshot. 119 * => use the origin; forget about the snapshot.
120 * - I/O error when reading exceptions 120 * - I/O error when reading exceptions
121 * => don't load the target; abort. 121 * => don't load the target; abort.
122 * (We can't use the intermediate origin state.) 122 * (We can't use the intermediate origin state.)
123 * - I/O error while merging 123 * - I/O error while merging
124 * => stop merging; set merge_failed; process I/O normally. 124 * => stop merging; set merge_failed; process I/O normally.
125 */ 125 */
126 int merge_failed; 126 int merge_failed;
127 127
128 /* Wait for events based on state_bits */ 128 /* Wait for events based on state_bits */
129 unsigned long state_bits; 129 unsigned long state_bits;
130 130
131 /* Range of chunks currently being merged. */ 131 /* Range of chunks currently being merged. */
132 chunk_t first_merging_chunk; 132 chunk_t first_merging_chunk;
133 int num_merging_chunks; 133 int num_merging_chunks;
134 134
135 /* 135 /*
136 * Incoming bios that overlap with chunks being merged must wait 136 * Incoming bios that overlap with chunks being merged must wait
137 * for them to be committed. 137 * for them to be committed.
138 */ 138 */
139 struct bio_list bios_queued_during_merge; 139 struct bio_list bios_queued_during_merge;
140 }; 140 };
141 141
142 /* 142 /*
143 * state_bits: 143 * state_bits:
144 * RUNNING_MERGE - Merge operation is in progress. 144 * RUNNING_MERGE - Merge operation is in progress.
145 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped; 145 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
146 * cleared afterwards. 146 * cleared afterwards.
147 */ 147 */
148 #define RUNNING_MERGE 0 148 #define RUNNING_MERGE 0
149 #define SHUTDOWN_MERGE 1 149 #define SHUTDOWN_MERGE 1
150 150
151 struct dm_dev *dm_snap_cow(struct dm_snapshot *s) 151 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
152 { 152 {
153 return s->cow; 153 return s->cow;
154 } 154 }
155 EXPORT_SYMBOL(dm_snap_cow); 155 EXPORT_SYMBOL(dm_snap_cow);
156 156
157 static struct workqueue_struct *ksnapd; 157 static struct workqueue_struct *ksnapd;
158 static void flush_queued_bios(struct work_struct *work); 158 static void flush_queued_bios(struct work_struct *work);
159 159
160 static sector_t chunk_to_sector(struct dm_exception_store *store, 160 static sector_t chunk_to_sector(struct dm_exception_store *store,
161 chunk_t chunk) 161 chunk_t chunk)
162 { 162 {
163 return chunk << store->chunk_shift; 163 return chunk << store->chunk_shift;
164 } 164 }
165 165
166 static int bdev_equal(struct block_device *lhs, struct block_device *rhs) 166 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
167 { 167 {
168 /* 168 /*
169 * There is only ever one instance of a particular block 169 * There is only ever one instance of a particular block
170 * device so we can compare pointers safely. 170 * device so we can compare pointers safely.
171 */ 171 */
172 return lhs == rhs; 172 return lhs == rhs;
173 } 173 }
174 174
175 struct dm_snap_pending_exception { 175 struct dm_snap_pending_exception {
176 struct dm_exception e; 176 struct dm_exception e;
177 177
178 /* 178 /*
179 * Origin buffers waiting for this to complete are held 179 * Origin buffers waiting for this to complete are held
180 * in a bio list 180 * in a bio list
181 */ 181 */
182 struct bio_list origin_bios; 182 struct bio_list origin_bios;
183 struct bio_list snapshot_bios; 183 struct bio_list snapshot_bios;
184 184
185 /* Pointer back to snapshot context */ 185 /* Pointer back to snapshot context */
186 struct dm_snapshot *snap; 186 struct dm_snapshot *snap;
187 187
188 /* 188 /*
189 * 1 indicates the exception has already been sent to 189 * 1 indicates the exception has already been sent to
190 * kcopyd. 190 * kcopyd.
191 */ 191 */
192 int started; 192 int started;
193 }; 193 };
194 194
195 /* 195 /*
196 * Hash table mapping origin volumes to lists of snapshots and 196 * Hash table mapping origin volumes to lists of snapshots and
197 * a lock to protect it 197 * a lock to protect it
198 */ 198 */
199 static struct kmem_cache *exception_cache; 199 static struct kmem_cache *exception_cache;
200 static struct kmem_cache *pending_cache; 200 static struct kmem_cache *pending_cache;
201 201
202 struct dm_snap_tracked_chunk { 202 struct dm_snap_tracked_chunk {
203 struct hlist_node node; 203 struct hlist_node node;
204 chunk_t chunk; 204 chunk_t chunk;
205 }; 205 };
206 206
207 static struct kmem_cache *tracked_chunk_cache; 207 static struct kmem_cache *tracked_chunk_cache;
208 208
209 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, 209 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
210 chunk_t chunk) 210 chunk_t chunk)
211 { 211 {
212 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, 212 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
213 GFP_NOIO); 213 GFP_NOIO);
214 unsigned long flags; 214 unsigned long flags;
215 215
216 c->chunk = chunk; 216 c->chunk = chunk;
217 217
218 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 218 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
219 hlist_add_head(&c->node, 219 hlist_add_head(&c->node,
220 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 220 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
221 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 221 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
222 222
223 return c; 223 return c;
224 } 224 }
225 225
226 static void stop_tracking_chunk(struct dm_snapshot *s, 226 static void stop_tracking_chunk(struct dm_snapshot *s,
227 struct dm_snap_tracked_chunk *c) 227 struct dm_snap_tracked_chunk *c)
228 { 228 {
229 unsigned long flags; 229 unsigned long flags;
230 230
231 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 231 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
232 hlist_del(&c->node); 232 hlist_del(&c->node);
233 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 233 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
234 234
235 mempool_free(c, s->tracked_chunk_pool); 235 mempool_free(c, s->tracked_chunk_pool);
236 } 236 }
237 237
238 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 238 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
239 { 239 {
240 struct dm_snap_tracked_chunk *c; 240 struct dm_snap_tracked_chunk *c;
241 struct hlist_node *hn; 241 struct hlist_node *hn;
242 int found = 0; 242 int found = 0;
243 243
244 spin_lock_irq(&s->tracked_chunk_lock); 244 spin_lock_irq(&s->tracked_chunk_lock);
245 245
246 hlist_for_each_entry(c, hn, 246 hlist_for_each_entry(c, hn,
247 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 247 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
248 if (c->chunk == chunk) { 248 if (c->chunk == chunk) {
249 found = 1; 249 found = 1;
250 break; 250 break;
251 } 251 }
252 } 252 }
253 253
254 spin_unlock_irq(&s->tracked_chunk_lock); 254 spin_unlock_irq(&s->tracked_chunk_lock);
255 255
256 return found; 256 return found;
257 } 257 }
258 258
259 /* 259 /*
260 * This conflicting I/O is extremely improbable in the caller, 260 * This conflicting I/O is extremely improbable in the caller,
261 * so msleep(1) is sufficient and there is no need for a wait queue. 261 * so msleep(1) is sufficient and there is no need for a wait queue.
262 */ 262 */
263 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) 263 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
264 { 264 {
265 while (__chunk_is_tracked(s, chunk)) 265 while (__chunk_is_tracked(s, chunk))
266 msleep(1); 266 msleep(1);
267 } 267 }
268 268
269 /* 269 /*
270 * One of these per registered origin, held in the snapshot_origins hash 270 * One of these per registered origin, held in the snapshot_origins hash
271 */ 271 */
272 struct origin { 272 struct origin {
273 /* The origin device */ 273 /* The origin device */
274 struct block_device *bdev; 274 struct block_device *bdev;
275 275
276 struct list_head hash_list; 276 struct list_head hash_list;
277 277
278 /* List of snapshots for this origin */ 278 /* List of snapshots for this origin */
279 struct list_head snapshots; 279 struct list_head snapshots;
280 }; 280 };
281 281
282 /* 282 /*
283 * Size of the hash table for origin volumes. If we make this 283 * Size of the hash table for origin volumes. If we make this
284 * the size of the minors list then it should be nearly perfect 284 * the size of the minors list then it should be nearly perfect
285 */ 285 */
286 #define ORIGIN_HASH_SIZE 256 286 #define ORIGIN_HASH_SIZE 256
287 #define ORIGIN_MASK 0xFF 287 #define ORIGIN_MASK 0xFF
288 static struct list_head *_origins; 288 static struct list_head *_origins;
289 static struct rw_semaphore _origins_lock; 289 static struct rw_semaphore _origins_lock;
290 290
291 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); 291 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
292 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock); 292 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
293 static uint64_t _pending_exceptions_done_count; 293 static uint64_t _pending_exceptions_done_count;
294 294
295 static int init_origin_hash(void) 295 static int init_origin_hash(void)
296 { 296 {
297 int i; 297 int i;
298 298
299 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 299 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
300 GFP_KERNEL); 300 GFP_KERNEL);
301 if (!_origins) { 301 if (!_origins) {
302 DMERR("unable to allocate memory"); 302 DMERR("unable to allocate memory");
303 return -ENOMEM; 303 return -ENOMEM;
304 } 304 }
305 305
306 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 306 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
307 INIT_LIST_HEAD(_origins + i); 307 INIT_LIST_HEAD(_origins + i);
308 init_rwsem(&_origins_lock); 308 init_rwsem(&_origins_lock);
309 309
310 return 0; 310 return 0;
311 } 311 }
312 312
313 static void exit_origin_hash(void) 313 static void exit_origin_hash(void)
314 { 314 {
315 kfree(_origins); 315 kfree(_origins);
316 } 316 }
317 317
318 static unsigned origin_hash(struct block_device *bdev) 318 static unsigned origin_hash(struct block_device *bdev)
319 { 319 {
320 return bdev->bd_dev & ORIGIN_MASK; 320 return bdev->bd_dev & ORIGIN_MASK;
321 } 321 }
322 322
323 static struct origin *__lookup_origin(struct block_device *origin) 323 static struct origin *__lookup_origin(struct block_device *origin)
324 { 324 {
325 struct list_head *ol; 325 struct list_head *ol;
326 struct origin *o; 326 struct origin *o;
327 327
328 ol = &_origins[origin_hash(origin)]; 328 ol = &_origins[origin_hash(origin)];
329 list_for_each_entry (o, ol, hash_list) 329 list_for_each_entry (o, ol, hash_list)
330 if (bdev_equal(o->bdev, origin)) 330 if (bdev_equal(o->bdev, origin))
331 return o; 331 return o;
332 332
333 return NULL; 333 return NULL;
334 } 334 }
335 335
336 static void __insert_origin(struct origin *o) 336 static void __insert_origin(struct origin *o)
337 { 337 {
338 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 338 struct list_head *sl = &_origins[origin_hash(o->bdev)];
339 list_add_tail(&o->hash_list, sl); 339 list_add_tail(&o->hash_list, sl);
340 } 340 }
341 341
342 /* 342 /*
343 * _origins_lock must be held when calling this function. 343 * _origins_lock must be held when calling this function.
344 * Returns number of snapshots registered using the supplied cow device, plus: 344 * Returns number of snapshots registered using the supplied cow device, plus:
345 * snap_src - a snapshot suitable for use as a source of exception handover 345 * snap_src - a snapshot suitable for use as a source of exception handover
346 * snap_dest - a snapshot capable of receiving exception handover. 346 * snap_dest - a snapshot capable of receiving exception handover.
347 * snap_merge - an existing snapshot-merge target linked to the same origin. 347 * snap_merge - an existing snapshot-merge target linked to the same origin.
348 * There can be at most one snapshot-merge target. The parameter is optional. 348 * There can be at most one snapshot-merge target. The parameter is optional.
349 * 349 *
350 * Possible return values and states of snap_src and snap_dest. 350 * Possible return values and states of snap_src and snap_dest.
351 * 0: NULL, NULL - first new snapshot 351 * 0: NULL, NULL - first new snapshot
352 * 1: snap_src, NULL - normal snapshot 352 * 1: snap_src, NULL - normal snapshot
353 * 2: snap_src, snap_dest - waiting for handover 353 * 2: snap_src, snap_dest - waiting for handover
354 * 2: snap_src, NULL - handed over, waiting for old to be deleted 354 * 2: snap_src, NULL - handed over, waiting for old to be deleted
355 * 1: NULL, snap_dest - source got destroyed without handover 355 * 1: NULL, snap_dest - source got destroyed without handover
356 */ 356 */
357 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, 357 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
358 struct dm_snapshot **snap_src, 358 struct dm_snapshot **snap_src,
359 struct dm_snapshot **snap_dest, 359 struct dm_snapshot **snap_dest,
360 struct dm_snapshot **snap_merge) 360 struct dm_snapshot **snap_merge)
361 { 361 {
362 struct dm_snapshot *s; 362 struct dm_snapshot *s;
363 struct origin *o; 363 struct origin *o;
364 int count = 0; 364 int count = 0;
365 int active; 365 int active;
366 366
367 o = __lookup_origin(snap->origin->bdev); 367 o = __lookup_origin(snap->origin->bdev);
368 if (!o) 368 if (!o)
369 goto out; 369 goto out;
370 370
371 list_for_each_entry(s, &o->snapshots, list) { 371 list_for_each_entry(s, &o->snapshots, list) {
372 if (dm_target_is_snapshot_merge(s->ti) && snap_merge) 372 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
373 *snap_merge = s; 373 *snap_merge = s;
374 if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) 374 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
375 continue; 375 continue;
376 376
377 down_read(&s->lock); 377 down_read(&s->lock);
378 active = s->active; 378 active = s->active;
379 up_read(&s->lock); 379 up_read(&s->lock);
380 380
381 if (active) { 381 if (active) {
382 if (snap_src) 382 if (snap_src)
383 *snap_src = s; 383 *snap_src = s;
384 } else if (snap_dest) 384 } else if (snap_dest)
385 *snap_dest = s; 385 *snap_dest = s;
386 386
387 count++; 387 count++;
388 } 388 }
389 389
390 out: 390 out:
391 return count; 391 return count;
392 } 392 }
393 393
394 /* 394 /*
395 * On success, returns 1 if this snapshot is a handover destination, 395 * On success, returns 1 if this snapshot is a handover destination,
396 * otherwise returns 0. 396 * otherwise returns 0.
397 */ 397 */
398 static int __validate_exception_handover(struct dm_snapshot *snap) 398 static int __validate_exception_handover(struct dm_snapshot *snap)
399 { 399 {
400 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 400 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
401 struct dm_snapshot *snap_merge = NULL; 401 struct dm_snapshot *snap_merge = NULL;
402 402
403 /* Does snapshot need exceptions handed over to it? */ 403 /* Does snapshot need exceptions handed over to it? */
404 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, 404 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
405 &snap_merge) == 2) || 405 &snap_merge) == 2) ||
406 snap_dest) { 406 snap_dest) {
407 snap->ti->error = "Snapshot cow pairing for exception " 407 snap->ti->error = "Snapshot cow pairing for exception "
408 "table handover failed"; 408 "table handover failed";
409 return -EINVAL; 409 return -EINVAL;
410 } 410 }
411 411
412 /* 412 /*
413 * If no snap_src was found, snap cannot become a handover 413 * If no snap_src was found, snap cannot become a handover
414 * destination. 414 * destination.
415 */ 415 */
416 if (!snap_src) 416 if (!snap_src)
417 return 0; 417 return 0;
418 418
419 /* 419 /*
420 * Non-snapshot-merge handover? 420 * Non-snapshot-merge handover?
421 */ 421 */
422 if (!dm_target_is_snapshot_merge(snap->ti)) 422 if (!dm_target_is_snapshot_merge(snap->ti))
423 return 1; 423 return 1;
424 424
425 /* 425 /*
426 * Do not allow more than one merging snapshot. 426 * Do not allow more than one merging snapshot.
427 */ 427 */
428 if (snap_merge) { 428 if (snap_merge) {
429 snap->ti->error = "A snapshot is already merging."; 429 snap->ti->error = "A snapshot is already merging.";
430 return -EINVAL; 430 return -EINVAL;
431 } 431 }
432 432
433 if (!snap_src->store->type->prepare_merge || 433 if (!snap_src->store->type->prepare_merge ||
434 !snap_src->store->type->commit_merge) { 434 !snap_src->store->type->commit_merge) {
435 snap->ti->error = "Snapshot exception store does not " 435 snap->ti->error = "Snapshot exception store does not "
436 "support snapshot-merge."; 436 "support snapshot-merge.";
437 return -EINVAL; 437 return -EINVAL;
438 } 438 }
439 439
440 return 1; 440 return 1;
441 } 441 }
442 442
443 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) 443 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
444 { 444 {
445 struct dm_snapshot *l; 445 struct dm_snapshot *l;
446 446
447 /* Sort the list according to chunk size, largest-first smallest-last */ 447 /* Sort the list according to chunk size, largest-first smallest-last */
448 list_for_each_entry(l, &o->snapshots, list) 448 list_for_each_entry(l, &o->snapshots, list)
449 if (l->store->chunk_size < s->store->chunk_size) 449 if (l->store->chunk_size < s->store->chunk_size)
450 break; 450 break;
451 list_add_tail(&s->list, &l->list); 451 list_add_tail(&s->list, &l->list);
452 } 452 }
453 453
454 /* 454 /*
455 * Make a note of the snapshot and its origin so we can look it 455 * Make a note of the snapshot and its origin so we can look it
456 * up when the origin has a write on it. 456 * up when the origin has a write on it.
457 * 457 *
458 * Also validate snapshot exception store handovers. 458 * Also validate snapshot exception store handovers.
459 * On success, returns 1 if this registration is a handover destination, 459 * On success, returns 1 if this registration is a handover destination,
460 * otherwise returns 0. 460 * otherwise returns 0.
461 */ 461 */
462 static int register_snapshot(struct dm_snapshot *snap) 462 static int register_snapshot(struct dm_snapshot *snap)
463 { 463 {
464 struct origin *o, *new_o = NULL; 464 struct origin *o, *new_o = NULL;
465 struct block_device *bdev = snap->origin->bdev; 465 struct block_device *bdev = snap->origin->bdev;
466 int r = 0; 466 int r = 0;
467 467
468 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 468 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
469 if (!new_o) 469 if (!new_o)
470 return -ENOMEM; 470 return -ENOMEM;
471 471
472 down_write(&_origins_lock); 472 down_write(&_origins_lock);
473 473
474 r = __validate_exception_handover(snap); 474 r = __validate_exception_handover(snap);
475 if (r < 0) { 475 if (r < 0) {
476 kfree(new_o); 476 kfree(new_o);
477 goto out; 477 goto out;
478 } 478 }
479 479
480 o = __lookup_origin(bdev); 480 o = __lookup_origin(bdev);
481 if (o) 481 if (o)
482 kfree(new_o); 482 kfree(new_o);
483 else { 483 else {
484 /* New origin */ 484 /* New origin */
485 o = new_o; 485 o = new_o;
486 486
487 /* Initialise the struct */ 487 /* Initialise the struct */
488 INIT_LIST_HEAD(&o->snapshots); 488 INIT_LIST_HEAD(&o->snapshots);
489 o->bdev = bdev; 489 o->bdev = bdev;
490 490
491 __insert_origin(o); 491 __insert_origin(o);
492 } 492 }
493 493
494 __insert_snapshot(o, snap); 494 __insert_snapshot(o, snap);
495 495
496 out: 496 out:
497 up_write(&_origins_lock); 497 up_write(&_origins_lock);
498 498
499 return r; 499 return r;
500 } 500 }
501 501
502 /* 502 /*
503 * Move snapshot to correct place in list according to chunk size. 503 * Move snapshot to correct place in list according to chunk size.
504 */ 504 */
505 static void reregister_snapshot(struct dm_snapshot *s) 505 static void reregister_snapshot(struct dm_snapshot *s)
506 { 506 {
507 struct block_device *bdev = s->origin->bdev; 507 struct block_device *bdev = s->origin->bdev;
508 508
509 down_write(&_origins_lock); 509 down_write(&_origins_lock);
510 510
511 list_del(&s->list); 511 list_del(&s->list);
512 __insert_snapshot(__lookup_origin(bdev), s); 512 __insert_snapshot(__lookup_origin(bdev), s);
513 513
514 up_write(&_origins_lock); 514 up_write(&_origins_lock);
515 } 515 }
516 516
517 static void unregister_snapshot(struct dm_snapshot *s) 517 static void unregister_snapshot(struct dm_snapshot *s)
518 { 518 {
519 struct origin *o; 519 struct origin *o;
520 520
521 down_write(&_origins_lock); 521 down_write(&_origins_lock);
522 o = __lookup_origin(s->origin->bdev); 522 o = __lookup_origin(s->origin->bdev);
523 523
524 list_del(&s->list); 524 list_del(&s->list);
525 if (o && list_empty(&o->snapshots)) { 525 if (o && list_empty(&o->snapshots)) {
526 list_del(&o->hash_list); 526 list_del(&o->hash_list);
527 kfree(o); 527 kfree(o);
528 } 528 }
529 529
530 up_write(&_origins_lock); 530 up_write(&_origins_lock);
531 } 531 }
532 532
533 /* 533 /*
534 * Implementation of the exception hash tables. 534 * Implementation of the exception hash tables.
535 * The lowest hash_shift bits of the chunk number are ignored, allowing 535 * The lowest hash_shift bits of the chunk number are ignored, allowing
536 * some consecutive chunks to be grouped together. 536 * some consecutive chunks to be grouped together.
537 */ 537 */
538 static int dm_exception_table_init(struct dm_exception_table *et, 538 static int dm_exception_table_init(struct dm_exception_table *et,
539 uint32_t size, unsigned hash_shift) 539 uint32_t size, unsigned hash_shift)
540 { 540 {
541 unsigned int i; 541 unsigned int i;
542 542
543 et->hash_shift = hash_shift; 543 et->hash_shift = hash_shift;
544 et->hash_mask = size - 1; 544 et->hash_mask = size - 1;
545 et->table = dm_vcalloc(size, sizeof(struct list_head)); 545 et->table = dm_vcalloc(size, sizeof(struct list_head));
546 if (!et->table) 546 if (!et->table)
547 return -ENOMEM; 547 return -ENOMEM;
548 548
549 for (i = 0; i < size; i++) 549 for (i = 0; i < size; i++)
550 INIT_LIST_HEAD(et->table + i); 550 INIT_LIST_HEAD(et->table + i);
551 551
552 return 0; 552 return 0;
553 } 553 }
554 554
555 static void dm_exception_table_exit(struct dm_exception_table *et, 555 static void dm_exception_table_exit(struct dm_exception_table *et,
556 struct kmem_cache *mem) 556 struct kmem_cache *mem)
557 { 557 {
558 struct list_head *slot; 558 struct list_head *slot;
559 struct dm_exception *ex, *next; 559 struct dm_exception *ex, *next;
560 int i, size; 560 int i, size;
561 561
562 size = et->hash_mask + 1; 562 size = et->hash_mask + 1;
563 for (i = 0; i < size; i++) { 563 for (i = 0; i < size; i++) {
564 slot = et->table + i; 564 slot = et->table + i;
565 565
566 list_for_each_entry_safe (ex, next, slot, hash_list) 566 list_for_each_entry_safe (ex, next, slot, hash_list)
567 kmem_cache_free(mem, ex); 567 kmem_cache_free(mem, ex);
568 } 568 }
569 569
570 vfree(et->table); 570 vfree(et->table);
571 } 571 }
572 572
573 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) 573 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
574 { 574 {
575 return (chunk >> et->hash_shift) & et->hash_mask; 575 return (chunk >> et->hash_shift) & et->hash_mask;
576 } 576 }
577 577
578 static void dm_remove_exception(struct dm_exception *e) 578 static void dm_remove_exception(struct dm_exception *e)
579 { 579 {
580 list_del(&e->hash_list); 580 list_del(&e->hash_list);
581 } 581 }
582 582
583 /* 583 /*
584 * Return the exception data for a sector, or NULL if not 584 * Return the exception data for a sector, or NULL if not
585 * remapped. 585 * remapped.
586 */ 586 */
587 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, 587 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
588 chunk_t chunk) 588 chunk_t chunk)
589 { 589 {
590 struct list_head *slot; 590 struct list_head *slot;
591 struct dm_exception *e; 591 struct dm_exception *e;
592 592
593 slot = &et->table[exception_hash(et, chunk)]; 593 slot = &et->table[exception_hash(et, chunk)];
594 list_for_each_entry (e, slot, hash_list) 594 list_for_each_entry (e, slot, hash_list)
595 if (chunk >= e->old_chunk && 595 if (chunk >= e->old_chunk &&
596 chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 596 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
597 return e; 597 return e;
598 598
599 return NULL; 599 return NULL;
600 } 600 }
601 601
602 static struct dm_exception *alloc_completed_exception(void) 602 static struct dm_exception *alloc_completed_exception(void)
603 { 603 {
604 struct dm_exception *e; 604 struct dm_exception *e;
605 605
606 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 606 e = kmem_cache_alloc(exception_cache, GFP_NOIO);
607 if (!e) 607 if (!e)
608 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 608 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
609 609
610 return e; 610 return e;
611 } 611 }
612 612
613 static void free_completed_exception(struct dm_exception *e) 613 static void free_completed_exception(struct dm_exception *e)
614 { 614 {
615 kmem_cache_free(exception_cache, e); 615 kmem_cache_free(exception_cache, e);
616 } 616 }
617 617
618 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 618 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
619 { 619 {
620 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 620 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
621 GFP_NOIO); 621 GFP_NOIO);
622 622
623 atomic_inc(&s->pending_exceptions_count); 623 atomic_inc(&s->pending_exceptions_count);
624 pe->snap = s; 624 pe->snap = s;
625 625
626 return pe; 626 return pe;
627 } 627 }
628 628
629 static void free_pending_exception(struct dm_snap_pending_exception *pe) 629 static void free_pending_exception(struct dm_snap_pending_exception *pe)
630 { 630 {
631 struct dm_snapshot *s = pe->snap; 631 struct dm_snapshot *s = pe->snap;
632 632
633 mempool_free(pe, s->pending_pool); 633 mempool_free(pe, s->pending_pool);
634 smp_mb__before_atomic_dec(); 634 smp_mb__before_atomic_dec();
635 atomic_dec(&s->pending_exceptions_count); 635 atomic_dec(&s->pending_exceptions_count);
636 } 636 }
637 637
638 static void dm_insert_exception(struct dm_exception_table *eh, 638 static void dm_insert_exception(struct dm_exception_table *eh,
639 struct dm_exception *new_e) 639 struct dm_exception *new_e)
640 { 640 {
641 struct list_head *l; 641 struct list_head *l;
642 struct dm_exception *e = NULL; 642 struct dm_exception *e = NULL;
643 643
644 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 644 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
645 645
646 /* Add immediately if this table doesn't support consecutive chunks */ 646 /* Add immediately if this table doesn't support consecutive chunks */
647 if (!eh->hash_shift) 647 if (!eh->hash_shift)
648 goto out; 648 goto out;
649 649
650 /* List is ordered by old_chunk */ 650 /* List is ordered by old_chunk */
651 list_for_each_entry_reverse(e, l, hash_list) { 651 list_for_each_entry_reverse(e, l, hash_list) {
652 /* Insert after an existing chunk? */ 652 /* Insert after an existing chunk? */
653 if (new_e->old_chunk == (e->old_chunk + 653 if (new_e->old_chunk == (e->old_chunk +
654 dm_consecutive_chunk_count(e) + 1) && 654 dm_consecutive_chunk_count(e) + 1) &&
655 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 655 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
656 dm_consecutive_chunk_count(e) + 1)) { 656 dm_consecutive_chunk_count(e) + 1)) {
657 dm_consecutive_chunk_count_inc(e); 657 dm_consecutive_chunk_count_inc(e);
658 free_completed_exception(new_e); 658 free_completed_exception(new_e);
659 return; 659 return;
660 } 660 }
661 661
662 /* Insert before an existing chunk? */ 662 /* Insert before an existing chunk? */
663 if (new_e->old_chunk == (e->old_chunk - 1) && 663 if (new_e->old_chunk == (e->old_chunk - 1) &&
664 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 664 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
665 dm_consecutive_chunk_count_inc(e); 665 dm_consecutive_chunk_count_inc(e);
666 e->old_chunk--; 666 e->old_chunk--;
667 e->new_chunk--; 667 e->new_chunk--;
668 free_completed_exception(new_e); 668 free_completed_exception(new_e);
669 return; 669 return;
670 } 670 }
671 671
672 if (new_e->old_chunk > e->old_chunk) 672 if (new_e->old_chunk > e->old_chunk)
673 break; 673 break;
674 } 674 }
675 675
676 out: 676 out:
677 list_add(&new_e->hash_list, e ? &e->hash_list : l); 677 list_add(&new_e->hash_list, e ? &e->hash_list : l);
678 } 678 }
679 679
680 /* 680 /*
681 * Callback used by the exception stores to load exceptions when 681 * Callback used by the exception stores to load exceptions when
682 * initialising. 682 * initialising.
683 */ 683 */
684 static int dm_add_exception(void *context, chunk_t old, chunk_t new) 684 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
685 { 685 {
686 struct dm_snapshot *s = context; 686 struct dm_snapshot *s = context;
687 struct dm_exception *e; 687 struct dm_exception *e;
688 688
689 e = alloc_completed_exception(); 689 e = alloc_completed_exception();
690 if (!e) 690 if (!e)
691 return -ENOMEM; 691 return -ENOMEM;
692 692
693 e->old_chunk = old; 693 e->old_chunk = old;
694 694
695 /* Consecutive_count is implicitly initialised to zero */ 695 /* Consecutive_count is implicitly initialised to zero */
696 e->new_chunk = new; 696 e->new_chunk = new;
697 697
698 dm_insert_exception(&s->complete, e); 698 dm_insert_exception(&s->complete, e);
699 699
700 return 0; 700 return 0;
701 } 701 }
702 702
703 #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r))) 703 #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
704 704
705 /* 705 /*
706 * Return a minimum chunk size of all snapshots that have the specified origin. 706 * Return a minimum chunk size of all snapshots that have the specified origin.
707 * Return zero if the origin has no snapshots. 707 * Return zero if the origin has no snapshots.
708 */ 708 */
709 static sector_t __minimum_chunk_size(struct origin *o) 709 static sector_t __minimum_chunk_size(struct origin *o)
710 { 710 {
711 struct dm_snapshot *snap; 711 struct dm_snapshot *snap;
712 unsigned chunk_size = 0; 712 unsigned chunk_size = 0;
713 713
714 if (o) 714 if (o)
715 list_for_each_entry(snap, &o->snapshots, list) 715 list_for_each_entry(snap, &o->snapshots, list)
716 chunk_size = min_not_zero(chunk_size, 716 chunk_size = min_not_zero(chunk_size,
717 snap->store->chunk_size); 717 snap->store->chunk_size);
718 718
719 return chunk_size; 719 return chunk_size;
720 } 720 }
721 721
722 /* 722 /*
723 * Hard coded magic. 723 * Hard coded magic.
724 */ 724 */
725 static int calc_max_buckets(void) 725 static int calc_max_buckets(void)
726 { 726 {
727 /* use a fixed size of 2MB */ 727 /* use a fixed size of 2MB */
728 unsigned long mem = 2 * 1024 * 1024; 728 unsigned long mem = 2 * 1024 * 1024;
729 mem /= sizeof(struct list_head); 729 mem /= sizeof(struct list_head);
730 730
731 return mem; 731 return mem;
732 } 732 }
733 733
734 /* 734 /*
735 * Allocate room for a suitable hash table. 735 * Allocate room for a suitable hash table.
736 */ 736 */
737 static int init_hash_tables(struct dm_snapshot *s) 737 static int init_hash_tables(struct dm_snapshot *s)
738 { 738 {
739 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 739 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
740 740
741 /* 741 /*
742 * Calculate based on the size of the original volume or 742 * Calculate based on the size of the original volume or
743 * the COW volume... 743 * the COW volume...
744 */ 744 */
745 cow_dev_size = get_dev_size(s->cow->bdev); 745 cow_dev_size = get_dev_size(s->cow->bdev);
746 origin_dev_size = get_dev_size(s->origin->bdev); 746 origin_dev_size = get_dev_size(s->origin->bdev);
747 max_buckets = calc_max_buckets(); 747 max_buckets = calc_max_buckets();
748 748
749 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 749 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
750 hash_size = min(hash_size, max_buckets); 750 hash_size = min(hash_size, max_buckets);
751 751
752 if (hash_size < 64) 752 if (hash_size < 64)
753 hash_size = 64; 753 hash_size = 64;
754 hash_size = rounddown_pow_of_two(hash_size); 754 hash_size = rounddown_pow_of_two(hash_size);
755 if (dm_exception_table_init(&s->complete, hash_size, 755 if (dm_exception_table_init(&s->complete, hash_size,
756 DM_CHUNK_CONSECUTIVE_BITS)) 756 DM_CHUNK_CONSECUTIVE_BITS))
757 return -ENOMEM; 757 return -ENOMEM;
758 758
759 /* 759 /*
760 * Allocate hash table for in-flight exceptions 760 * Allocate hash table for in-flight exceptions
761 * Make this smaller than the real hash table 761 * Make this smaller than the real hash table
762 */ 762 */
763 hash_size >>= 3; 763 hash_size >>= 3;
764 if (hash_size < 64) 764 if (hash_size < 64)
765 hash_size = 64; 765 hash_size = 64;
766 766
767 if (dm_exception_table_init(&s->pending, hash_size, 0)) { 767 if (dm_exception_table_init(&s->pending, hash_size, 0)) {
768 dm_exception_table_exit(&s->complete, exception_cache); 768 dm_exception_table_exit(&s->complete, exception_cache);
769 return -ENOMEM; 769 return -ENOMEM;
770 } 770 }
771 771
772 return 0; 772 return 0;
773 } 773 }
774 774
775 static void merge_shutdown(struct dm_snapshot *s) 775 static void merge_shutdown(struct dm_snapshot *s)
776 { 776 {
777 clear_bit_unlock(RUNNING_MERGE, &s->state_bits); 777 clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
778 smp_mb__after_clear_bit(); 778 smp_mb__after_clear_bit();
779 wake_up_bit(&s->state_bits, RUNNING_MERGE); 779 wake_up_bit(&s->state_bits, RUNNING_MERGE);
780 } 780 }
781 781
782 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) 782 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
783 { 783 {
784 s->first_merging_chunk = 0; 784 s->first_merging_chunk = 0;
785 s->num_merging_chunks = 0; 785 s->num_merging_chunks = 0;
786 786
787 return bio_list_get(&s->bios_queued_during_merge); 787 return bio_list_get(&s->bios_queued_during_merge);
788 } 788 }
789 789
790 /* 790 /*
791 * Remove one chunk from the index of completed exceptions. 791 * Remove one chunk from the index of completed exceptions.
792 */ 792 */
793 static int __remove_single_exception_chunk(struct dm_snapshot *s, 793 static int __remove_single_exception_chunk(struct dm_snapshot *s,
794 chunk_t old_chunk) 794 chunk_t old_chunk)
795 { 795 {
796 struct dm_exception *e; 796 struct dm_exception *e;
797 797
798 e = dm_lookup_exception(&s->complete, old_chunk); 798 e = dm_lookup_exception(&s->complete, old_chunk);
799 if (!e) { 799 if (!e) {
800 DMERR("Corruption detected: exception for block %llu is " 800 DMERR("Corruption detected: exception for block %llu is "
801 "on disk but not in memory", 801 "on disk but not in memory",
802 (unsigned long long)old_chunk); 802 (unsigned long long)old_chunk);
803 return -EINVAL; 803 return -EINVAL;
804 } 804 }
805 805
806 /* 806 /*
807 * If this is the only chunk using this exception, remove exception. 807 * If this is the only chunk using this exception, remove exception.
808 */ 808 */
809 if (!dm_consecutive_chunk_count(e)) { 809 if (!dm_consecutive_chunk_count(e)) {
810 dm_remove_exception(e); 810 dm_remove_exception(e);
811 free_completed_exception(e); 811 free_completed_exception(e);
812 return 0; 812 return 0;
813 } 813 }
814 814
815 /* 815 /*
816 * The chunk may be either at the beginning or the end of a 816 * The chunk may be either at the beginning or the end of a
817 * group of consecutive chunks - never in the middle. We are 817 * group of consecutive chunks - never in the middle. We are
818 * removing chunks in the opposite order to that in which they 818 * removing chunks in the opposite order to that in which they
819 * were added, so this should always be true. 819 * were added, so this should always be true.
820 * Decrement the consecutive chunk counter and adjust the 820 * Decrement the consecutive chunk counter and adjust the
821 * starting point if necessary. 821 * starting point if necessary.
822 */ 822 */
823 if (old_chunk == e->old_chunk) { 823 if (old_chunk == e->old_chunk) {
824 e->old_chunk++; 824 e->old_chunk++;
825 e->new_chunk++; 825 e->new_chunk++;
826 } else if (old_chunk != e->old_chunk + 826 } else if (old_chunk != e->old_chunk +
827 dm_consecutive_chunk_count(e)) { 827 dm_consecutive_chunk_count(e)) {
828 DMERR("Attempt to merge block %llu from the " 828 DMERR("Attempt to merge block %llu from the "
829 "middle of a chunk range [%llu - %llu]", 829 "middle of a chunk range [%llu - %llu]",
830 (unsigned long long)old_chunk, 830 (unsigned long long)old_chunk,
831 (unsigned long long)e->old_chunk, 831 (unsigned long long)e->old_chunk,
832 (unsigned long long) 832 (unsigned long long)
833 e->old_chunk + dm_consecutive_chunk_count(e)); 833 e->old_chunk + dm_consecutive_chunk_count(e));
834 return -EINVAL; 834 return -EINVAL;
835 } 835 }
836 836
837 dm_consecutive_chunk_count_dec(e); 837 dm_consecutive_chunk_count_dec(e);
838 838
839 return 0; 839 return 0;
840 } 840 }
841 841
842 static void flush_bios(struct bio *bio); 842 static void flush_bios(struct bio *bio);
843 843
844 static int remove_single_exception_chunk(struct dm_snapshot *s) 844 static int remove_single_exception_chunk(struct dm_snapshot *s)
845 { 845 {
846 struct bio *b = NULL; 846 struct bio *b = NULL;
847 int r; 847 int r;
848 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; 848 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
849 849
850 down_write(&s->lock); 850 down_write(&s->lock);
851 851
852 /* 852 /*
853 * Process chunks (and associated exceptions) in reverse order 853 * Process chunks (and associated exceptions) in reverse order
854 * so that dm_consecutive_chunk_count_dec() accounting works. 854 * so that dm_consecutive_chunk_count_dec() accounting works.
855 */ 855 */
856 do { 856 do {
857 r = __remove_single_exception_chunk(s, old_chunk); 857 r = __remove_single_exception_chunk(s, old_chunk);
858 if (r) 858 if (r)
859 goto out; 859 goto out;
860 } while (old_chunk-- > s->first_merging_chunk); 860 } while (old_chunk-- > s->first_merging_chunk);
861 861
862 b = __release_queued_bios_after_merge(s); 862 b = __release_queued_bios_after_merge(s);
863 863
864 out: 864 out:
865 up_write(&s->lock); 865 up_write(&s->lock);
866 if (b) 866 if (b)
867 flush_bios(b); 867 flush_bios(b);
868 868
869 return r; 869 return r;
870 } 870 }
871 871
872 static int origin_write_extent(struct dm_snapshot *merging_snap, 872 static int origin_write_extent(struct dm_snapshot *merging_snap,
873 sector_t sector, unsigned chunk_size); 873 sector_t sector, unsigned chunk_size);
874 874
875 static void merge_callback(int read_err, unsigned long write_err, 875 static void merge_callback(int read_err, unsigned long write_err,
876 void *context); 876 void *context);
877 877
878 static uint64_t read_pending_exceptions_done_count(void) 878 static uint64_t read_pending_exceptions_done_count(void)
879 { 879 {
880 uint64_t pending_exceptions_done; 880 uint64_t pending_exceptions_done;
881 881
882 spin_lock(&_pending_exceptions_done_spinlock); 882 spin_lock(&_pending_exceptions_done_spinlock);
883 pending_exceptions_done = _pending_exceptions_done_count; 883 pending_exceptions_done = _pending_exceptions_done_count;
884 spin_unlock(&_pending_exceptions_done_spinlock); 884 spin_unlock(&_pending_exceptions_done_spinlock);
885 885
886 return pending_exceptions_done; 886 return pending_exceptions_done;
887 } 887 }
888 888
889 static void increment_pending_exceptions_done_count(void) 889 static void increment_pending_exceptions_done_count(void)
890 { 890 {
891 spin_lock(&_pending_exceptions_done_spinlock); 891 spin_lock(&_pending_exceptions_done_spinlock);
892 _pending_exceptions_done_count++; 892 _pending_exceptions_done_count++;
893 spin_unlock(&_pending_exceptions_done_spinlock); 893 spin_unlock(&_pending_exceptions_done_spinlock);
894 894
895 wake_up_all(&_pending_exceptions_done); 895 wake_up_all(&_pending_exceptions_done);
896 } 896 }
897 897
898 static void snapshot_merge_next_chunks(struct dm_snapshot *s) 898 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
899 { 899 {
900 int i, linear_chunks; 900 int i, linear_chunks;
901 chunk_t old_chunk, new_chunk; 901 chunk_t old_chunk, new_chunk;
902 struct dm_io_region src, dest; 902 struct dm_io_region src, dest;
903 sector_t io_size; 903 sector_t io_size;
904 uint64_t previous_count; 904 uint64_t previous_count;
905 905
906 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); 906 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
907 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) 907 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
908 goto shut; 908 goto shut;
909 909
910 /* 910 /*
911 * valid flag never changes during merge, so no lock required. 911 * valid flag never changes during merge, so no lock required.
912 */ 912 */
913 if (!s->valid) { 913 if (!s->valid) {
914 DMERR("Snapshot is invalid: can't merge"); 914 DMERR("Snapshot is invalid: can't merge");
915 goto shut; 915 goto shut;
916 } 916 }
917 917
918 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, 918 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
919 &new_chunk); 919 &new_chunk);
920 if (linear_chunks <= 0) { 920 if (linear_chunks <= 0) {
921 if (linear_chunks < 0) { 921 if (linear_chunks < 0) {
922 DMERR("Read error in exception store: " 922 DMERR("Read error in exception store: "
923 "shutting down merge"); 923 "shutting down merge");
924 down_write(&s->lock); 924 down_write(&s->lock);
925 s->merge_failed = 1; 925 s->merge_failed = 1;
926 up_write(&s->lock); 926 up_write(&s->lock);
927 } 927 }
928 goto shut; 928 goto shut;
929 } 929 }
930 930
931 /* Adjust old_chunk and new_chunk to reflect start of linear region */ 931 /* Adjust old_chunk and new_chunk to reflect start of linear region */
932 old_chunk = old_chunk + 1 - linear_chunks; 932 old_chunk = old_chunk + 1 - linear_chunks;
933 new_chunk = new_chunk + 1 - linear_chunks; 933 new_chunk = new_chunk + 1 - linear_chunks;
934 934
935 /* 935 /*
936 * Use one (potentially large) I/O to copy all 'linear_chunks' 936 * Use one (potentially large) I/O to copy all 'linear_chunks'
937 * from the exception store to the origin 937 * from the exception store to the origin
938 */ 938 */
939 io_size = linear_chunks * s->store->chunk_size; 939 io_size = linear_chunks * s->store->chunk_size;
940 940
941 dest.bdev = s->origin->bdev; 941 dest.bdev = s->origin->bdev;
942 dest.sector = chunk_to_sector(s->store, old_chunk); 942 dest.sector = chunk_to_sector(s->store, old_chunk);
943 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); 943 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
944 944
945 src.bdev = s->cow->bdev; 945 src.bdev = s->cow->bdev;
946 src.sector = chunk_to_sector(s->store, new_chunk); 946 src.sector = chunk_to_sector(s->store, new_chunk);
947 src.count = dest.count; 947 src.count = dest.count;
948 948
949 /* 949 /*
950 * Reallocate any exceptions needed in other snapshots then 950 * Reallocate any exceptions needed in other snapshots then
951 * wait for the pending exceptions to complete. 951 * wait for the pending exceptions to complete.
952 * Each time any pending exception (globally on the system) 952 * Each time any pending exception (globally on the system)
953 * completes we are woken and repeat the process to find out 953 * completes we are woken and repeat the process to find out
954 * if we can proceed. While this may not seem a particularly 954 * if we can proceed. While this may not seem a particularly
955 * efficient algorithm, it is not expected to have any 955 * efficient algorithm, it is not expected to have any
956 * significant impact on performance. 956 * significant impact on performance.
957 */ 957 */
958 previous_count = read_pending_exceptions_done_count(); 958 previous_count = read_pending_exceptions_done_count();
959 while (origin_write_extent(s, dest.sector, io_size)) { 959 while (origin_write_extent(s, dest.sector, io_size)) {
960 wait_event(_pending_exceptions_done, 960 wait_event(_pending_exceptions_done,
961 (read_pending_exceptions_done_count() != 961 (read_pending_exceptions_done_count() !=
962 previous_count)); 962 previous_count));
963 /* Retry after the wait, until all exceptions are done. */ 963 /* Retry after the wait, until all exceptions are done. */
964 previous_count = read_pending_exceptions_done_count(); 964 previous_count = read_pending_exceptions_done_count();
965 } 965 }
966 966
967 down_write(&s->lock); 967 down_write(&s->lock);
968 s->first_merging_chunk = old_chunk; 968 s->first_merging_chunk = old_chunk;
969 s->num_merging_chunks = linear_chunks; 969 s->num_merging_chunks = linear_chunks;
970 up_write(&s->lock); 970 up_write(&s->lock);
971 971
972 /* Wait until writes to all 'linear_chunks' drain */ 972 /* Wait until writes to all 'linear_chunks' drain */
973 for (i = 0; i < linear_chunks; i++) 973 for (i = 0; i < linear_chunks; i++)
974 __check_for_conflicting_io(s, old_chunk + i); 974 __check_for_conflicting_io(s, old_chunk + i);
975 975
976 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); 976 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
977 return; 977 return;
978 978
979 shut: 979 shut:
980 merge_shutdown(s); 980 merge_shutdown(s);
981 } 981 }
982 982
983 static void error_bios(struct bio *bio); 983 static void error_bios(struct bio *bio);
984 984
985 static void merge_callback(int read_err, unsigned long write_err, void *context) 985 static void merge_callback(int read_err, unsigned long write_err, void *context)
986 { 986 {
987 struct dm_snapshot *s = context; 987 struct dm_snapshot *s = context;
988 struct bio *b = NULL; 988 struct bio *b = NULL;
989 989
990 if (read_err || write_err) { 990 if (read_err || write_err) {
991 if (read_err) 991 if (read_err)
992 DMERR("Read error: shutting down merge."); 992 DMERR("Read error: shutting down merge.");
993 else 993 else
994 DMERR("Write error: shutting down merge."); 994 DMERR("Write error: shutting down merge.");
995 goto shut; 995 goto shut;
996 } 996 }
997 997
998 if (s->store->type->commit_merge(s->store, 998 if (s->store->type->commit_merge(s->store,
999 s->num_merging_chunks) < 0) { 999 s->num_merging_chunks) < 0) {
1000 DMERR("Write error in exception store: shutting down merge"); 1000 DMERR("Write error in exception store: shutting down merge");
1001 goto shut; 1001 goto shut;
1002 } 1002 }
1003 1003
1004 if (remove_single_exception_chunk(s) < 0) 1004 if (remove_single_exception_chunk(s) < 0)
1005 goto shut; 1005 goto shut;
1006 1006
1007 snapshot_merge_next_chunks(s); 1007 snapshot_merge_next_chunks(s);
1008 1008
1009 return; 1009 return;
1010 1010
1011 shut: 1011 shut:
1012 down_write(&s->lock); 1012 down_write(&s->lock);
1013 s->merge_failed = 1; 1013 s->merge_failed = 1;
1014 b = __release_queued_bios_after_merge(s); 1014 b = __release_queued_bios_after_merge(s);
1015 up_write(&s->lock); 1015 up_write(&s->lock);
1016 error_bios(b); 1016 error_bios(b);
1017 1017
1018 merge_shutdown(s); 1018 merge_shutdown(s);
1019 } 1019 }
1020 1020
1021 static void start_merge(struct dm_snapshot *s) 1021 static void start_merge(struct dm_snapshot *s)
1022 { 1022 {
1023 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) 1023 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1024 snapshot_merge_next_chunks(s); 1024 snapshot_merge_next_chunks(s);
1025 } 1025 }
1026 1026
1027 static int wait_schedule(void *ptr) 1027 static int wait_schedule(void *ptr)
1028 { 1028 {
1029 schedule(); 1029 schedule();
1030 1030
1031 return 0; 1031 return 0;
1032 } 1032 }
1033 1033
1034 /* 1034 /*
1035 * Stop the merging process and wait until it finishes. 1035 * Stop the merging process and wait until it finishes.
1036 */ 1036 */
1037 static void stop_merge(struct dm_snapshot *s) 1037 static void stop_merge(struct dm_snapshot *s)
1038 { 1038 {
1039 set_bit(SHUTDOWN_MERGE, &s->state_bits); 1039 set_bit(SHUTDOWN_MERGE, &s->state_bits);
1040 wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule, 1040 wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1041 TASK_UNINTERRUPTIBLE); 1041 TASK_UNINTERRUPTIBLE);
1042 clear_bit(SHUTDOWN_MERGE, &s->state_bits); 1042 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1043 } 1043 }
1044 1044
1045 /* 1045 /*
1046 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 1046 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1047 */ 1047 */
1048 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1048 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1049 { 1049 {
1050 struct dm_snapshot *s; 1050 struct dm_snapshot *s;
1051 int i; 1051 int i;
1052 int r = -EINVAL; 1052 int r = -EINVAL;
1053 char *origin_path, *cow_path; 1053 char *origin_path, *cow_path;
1054 unsigned args_used, num_flush_requests = 1; 1054 unsigned args_used, num_flush_requests = 1;
1055 fmode_t origin_mode = FMODE_READ; 1055 fmode_t origin_mode = FMODE_READ;
1056 1056
1057 if (argc != 4) { 1057 if (argc != 4) {
1058 ti->error = "requires exactly 4 arguments"; 1058 ti->error = "requires exactly 4 arguments";
1059 r = -EINVAL; 1059 r = -EINVAL;
1060 goto bad; 1060 goto bad;
1061 } 1061 }
1062 1062
1063 if (dm_target_is_snapshot_merge(ti)) { 1063 if (dm_target_is_snapshot_merge(ti)) {
1064 num_flush_requests = 2; 1064 num_flush_requests = 2;
1065 origin_mode = FMODE_WRITE; 1065 origin_mode = FMODE_WRITE;
1066 } 1066 }
1067 1067
1068 origin_path = argv[0]; 1068 origin_path = argv[0];
1069 argv++; 1069 argv++;
1070 argc--; 1070 argc--;
1071 1071
1072 s = kmalloc(sizeof(*s), GFP_KERNEL); 1072 s = kmalloc(sizeof(*s), GFP_KERNEL);
1073 if (!s) { 1073 if (!s) {
1074 ti->error = "Cannot allocate snapshot context private " 1074 ti->error = "Cannot allocate snapshot context private "
1075 "structure"; 1075 "structure";
1076 r = -ENOMEM; 1076 r = -ENOMEM;
1077 goto bad; 1077 goto bad;
1078 } 1078 }
1079 1079
1080 cow_path = argv[0]; 1080 cow_path = argv[0];
1081 argv++; 1081 argv++;
1082 argc--; 1082 argc--;
1083 1083
1084 r = dm_get_device(ti, cow_path, 0, 0, 1084 r = dm_get_device(ti, cow_path, 0, 0,
1085 FMODE_READ | FMODE_WRITE, &s->cow); 1085 FMODE_READ | FMODE_WRITE, &s->cow);
1086 if (r) { 1086 if (r) {
1087 ti->error = "Cannot get COW device"; 1087 ti->error = "Cannot get COW device";
1088 goto bad_cow; 1088 goto bad_cow;
1089 } 1089 }
1090 1090
1091 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); 1091 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1092 if (r) { 1092 if (r) {
1093 ti->error = "Couldn't create exception store"; 1093 ti->error = "Couldn't create exception store";
1094 r = -EINVAL; 1094 r = -EINVAL;
1095 goto bad_store; 1095 goto bad_store;
1096 } 1096 }
1097 1097
1098 argv += args_used; 1098 argv += args_used;
1099 argc -= args_used; 1099 argc -= args_used;
1100 1100
1101 r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin); 1101 r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
1102 if (r) { 1102 if (r) {
1103 ti->error = "Cannot get origin device"; 1103 ti->error = "Cannot get origin device";
1104 goto bad_origin; 1104 goto bad_origin;
1105 } 1105 }
1106 1106
1107 s->ti = ti; 1107 s->ti = ti;
1108 s->valid = 1; 1108 s->valid = 1;
1109 s->active = 0; 1109 s->active = 0;
1110 s->suspended = 0; 1110 s->suspended = 0;
1111 atomic_set(&s->pending_exceptions_count, 0); 1111 atomic_set(&s->pending_exceptions_count, 0);
1112 init_rwsem(&s->lock); 1112 init_rwsem(&s->lock);
1113 INIT_LIST_HEAD(&s->list); 1113 INIT_LIST_HEAD(&s->list);
1114 spin_lock_init(&s->pe_lock); 1114 spin_lock_init(&s->pe_lock);
1115 s->state_bits = 0; 1115 s->state_bits = 0;
1116 s->merge_failed = 0; 1116 s->merge_failed = 0;
1117 s->first_merging_chunk = 0; 1117 s->first_merging_chunk = 0;
1118 s->num_merging_chunks = 0; 1118 s->num_merging_chunks = 0;
1119 bio_list_init(&s->bios_queued_during_merge); 1119 bio_list_init(&s->bios_queued_during_merge);
1120 1120
1121 /* Allocate hash table for COW data */ 1121 /* Allocate hash table for COW data */
1122 if (init_hash_tables(s)) { 1122 if (init_hash_tables(s)) {
1123 ti->error = "Unable to allocate hash table space"; 1123 ti->error = "Unable to allocate hash table space";
1124 r = -ENOMEM; 1124 r = -ENOMEM;
1125 goto bad_hash_tables; 1125 goto bad_hash_tables;
1126 } 1126 }
1127 1127
1128 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 1128 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
1129 if (r) { 1129 if (r) {
1130 ti->error = "Could not create kcopyd client"; 1130 ti->error = "Could not create kcopyd client";
1131 goto bad_kcopyd; 1131 goto bad_kcopyd;
1132 } 1132 }
1133 1133
1134 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); 1134 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1135 if (!s->pending_pool) { 1135 if (!s->pending_pool) {
1136 ti->error = "Could not allocate mempool for pending exceptions"; 1136 ti->error = "Could not allocate mempool for pending exceptions";
1137 goto bad_pending_pool; 1137 goto bad_pending_pool;
1138 } 1138 }
1139 1139
1140 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, 1140 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
1141 tracked_chunk_cache); 1141 tracked_chunk_cache);
1142 if (!s->tracked_chunk_pool) { 1142 if (!s->tracked_chunk_pool) {
1143 ti->error = "Could not allocate tracked_chunk mempool for " 1143 ti->error = "Could not allocate tracked_chunk mempool for "
1144 "tracking reads"; 1144 "tracking reads";
1145 goto bad_tracked_chunk_pool; 1145 goto bad_tracked_chunk_pool;
1146 } 1146 }
1147 1147
1148 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1148 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1149 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 1149 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1150 1150
1151 spin_lock_init(&s->tracked_chunk_lock); 1151 spin_lock_init(&s->tracked_chunk_lock);
1152 1152
1153 bio_list_init(&s->queued_bios); 1153 bio_list_init(&s->queued_bios);
1154 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 1154 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
1155 1155
1156 ti->private = s; 1156 ti->private = s;
1157 ti->num_flush_requests = num_flush_requests; 1157 ti->num_flush_requests = num_flush_requests;
1158 1158
1159 /* Add snapshot to the list of snapshots for this origin */ 1159 /* Add snapshot to the list of snapshots for this origin */
1160 /* Exceptions aren't triggered till snapshot_resume() is called */ 1160 /* Exceptions aren't triggered till snapshot_resume() is called */
1161 r = register_snapshot(s); 1161 r = register_snapshot(s);
1162 if (r == -ENOMEM) { 1162 if (r == -ENOMEM) {
1163 ti->error = "Snapshot origin struct allocation failed"; 1163 ti->error = "Snapshot origin struct allocation failed";
1164 goto bad_load_and_register; 1164 goto bad_load_and_register;
1165 } else if (r < 0) { 1165 } else if (r < 0) {
1166 /* invalid handover, register_snapshot has set ti->error */ 1166 /* invalid handover, register_snapshot has set ti->error */
1167 goto bad_load_and_register; 1167 goto bad_load_and_register;
1168 } 1168 }
1169 1169
1170 /* 1170 /*
1171 * Metadata must only be loaded into one table at once, so skip this 1171 * Metadata must only be loaded into one table at once, so skip this
1172 * if metadata will be handed over during resume. 1172 * if metadata will be handed over during resume.
1173 * Chunk size will be set during the handover - set it to zero to 1173 * Chunk size will be set during the handover - set it to zero to
1174 * ensure it's ignored. 1174 * ensure it's ignored.
1175 */ 1175 */
1176 if (r > 0) { 1176 if (r > 0) {
1177 s->store->chunk_size = 0; 1177 s->store->chunk_size = 0;
1178 return 0; 1178 return 0;
1179 } 1179 }
1180 1180
1181 r = s->store->type->read_metadata(s->store, dm_add_exception, 1181 r = s->store->type->read_metadata(s->store, dm_add_exception,
1182 (void *)s); 1182 (void *)s);
1183 if (r < 0) { 1183 if (r < 0) {
1184 ti->error = "Failed to read snapshot metadata"; 1184 ti->error = "Failed to read snapshot metadata";
1185 goto bad_read_metadata; 1185 goto bad_read_metadata;
1186 } else if (r > 0) { 1186 } else if (r > 0) {
1187 s->valid = 0; 1187 s->valid = 0;
1188 DMWARN("Snapshot is marked invalid."); 1188 DMWARN("Snapshot is marked invalid.");
1189 } 1189 }
1190 1190
1191 if (!s->store->chunk_size) { 1191 if (!s->store->chunk_size) {
1192 ti->error = "Chunk size not set"; 1192 ti->error = "Chunk size not set";
1193 goto bad_read_metadata; 1193 goto bad_read_metadata;
1194 } 1194 }
1195 ti->split_io = s->store->chunk_size; 1195 ti->split_io = s->store->chunk_size;
1196 1196
1197 return 0; 1197 return 0;
1198 1198
1199 bad_read_metadata: 1199 bad_read_metadata:
1200 unregister_snapshot(s); 1200 unregister_snapshot(s);
1201 1201
1202 bad_load_and_register: 1202 bad_load_and_register:
1203 mempool_destroy(s->tracked_chunk_pool); 1203 mempool_destroy(s->tracked_chunk_pool);
1204 1204
1205 bad_tracked_chunk_pool: 1205 bad_tracked_chunk_pool:
1206 mempool_destroy(s->pending_pool); 1206 mempool_destroy(s->pending_pool);
1207 1207
1208 bad_pending_pool: 1208 bad_pending_pool:
1209 dm_kcopyd_client_destroy(s->kcopyd_client); 1209 dm_kcopyd_client_destroy(s->kcopyd_client);
1210 1210
1211 bad_kcopyd: 1211 bad_kcopyd:
1212 dm_exception_table_exit(&s->pending, pending_cache); 1212 dm_exception_table_exit(&s->pending, pending_cache);
1213 dm_exception_table_exit(&s->complete, exception_cache); 1213 dm_exception_table_exit(&s->complete, exception_cache);
1214 1214
1215 bad_hash_tables: 1215 bad_hash_tables:
1216 dm_put_device(ti, s->origin); 1216 dm_put_device(ti, s->origin);
1217 1217
1218 bad_origin: 1218 bad_origin:
1219 dm_exception_store_destroy(s->store); 1219 dm_exception_store_destroy(s->store);
1220 1220
1221 bad_store: 1221 bad_store:
1222 dm_put_device(ti, s->cow); 1222 dm_put_device(ti, s->cow);
1223 1223
1224 bad_cow: 1224 bad_cow:
1225 kfree(s); 1225 kfree(s);
1226 1226
1227 bad: 1227 bad:
1228 return r; 1228 return r;
1229 } 1229 }
1230 1230
1231 static void __free_exceptions(struct dm_snapshot *s) 1231 static void __free_exceptions(struct dm_snapshot *s)
1232 { 1232 {
1233 dm_kcopyd_client_destroy(s->kcopyd_client); 1233 dm_kcopyd_client_destroy(s->kcopyd_client);
1234 s->kcopyd_client = NULL; 1234 s->kcopyd_client = NULL;
1235 1235
1236 dm_exception_table_exit(&s->pending, pending_cache); 1236 dm_exception_table_exit(&s->pending, pending_cache);
1237 dm_exception_table_exit(&s->complete, exception_cache); 1237 dm_exception_table_exit(&s->complete, exception_cache);
1238 } 1238 }
1239 1239
1240 static void __handover_exceptions(struct dm_snapshot *snap_src, 1240 static void __handover_exceptions(struct dm_snapshot *snap_src,
1241 struct dm_snapshot *snap_dest) 1241 struct dm_snapshot *snap_dest)
1242 { 1242 {
1243 union { 1243 union {
1244 struct dm_exception_table table_swap; 1244 struct dm_exception_table table_swap;
1245 struct dm_exception_store *store_swap; 1245 struct dm_exception_store *store_swap;
1246 } u; 1246 } u;
1247 1247
1248 /* 1248 /*
1249 * Swap all snapshot context information between the two instances. 1249 * Swap all snapshot context information between the two instances.
1250 */ 1250 */
1251 u.table_swap = snap_dest->complete; 1251 u.table_swap = snap_dest->complete;
1252 snap_dest->complete = snap_src->complete; 1252 snap_dest->complete = snap_src->complete;
1253 snap_src->complete = u.table_swap; 1253 snap_src->complete = u.table_swap;
1254 1254
1255 u.store_swap = snap_dest->store; 1255 u.store_swap = snap_dest->store;
1256 snap_dest->store = snap_src->store; 1256 snap_dest->store = snap_src->store;
1257 snap_src->store = u.store_swap; 1257 snap_src->store = u.store_swap;
1258 1258
1259 snap_dest->store->snap = snap_dest; 1259 snap_dest->store->snap = snap_dest;
1260 snap_src->store->snap = snap_src; 1260 snap_src->store->snap = snap_src;
1261 1261
1262 snap_dest->ti->split_io = snap_dest->store->chunk_size; 1262 snap_dest->ti->split_io = snap_dest->store->chunk_size;
1263 snap_dest->valid = snap_src->valid; 1263 snap_dest->valid = snap_src->valid;
1264 1264
1265 /* 1265 /*
1266 * Set source invalid to ensure it receives no further I/O. 1266 * Set source invalid to ensure it receives no further I/O.
1267 */ 1267 */
1268 snap_src->valid = 0; 1268 snap_src->valid = 0;
1269 } 1269 }
1270 1270
1271 static void snapshot_dtr(struct dm_target *ti) 1271 static void snapshot_dtr(struct dm_target *ti)
1272 { 1272 {
1273 #ifdef CONFIG_DM_DEBUG 1273 #ifdef CONFIG_DM_DEBUG
1274 int i; 1274 int i;
1275 #endif 1275 #endif
1276 struct dm_snapshot *s = ti->private; 1276 struct dm_snapshot *s = ti->private;
1277 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1277 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1278 1278
1279 flush_workqueue(ksnapd); 1279 flush_workqueue(ksnapd);
1280 1280
1281 down_read(&_origins_lock); 1281 down_read(&_origins_lock);
1282 /* Check whether exception handover must be cancelled */ 1282 /* Check whether exception handover must be cancelled */
1283 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1283 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1284 if (snap_src && snap_dest && (s == snap_src)) { 1284 if (snap_src && snap_dest && (s == snap_src)) {
1285 down_write(&snap_dest->lock); 1285 down_write(&snap_dest->lock);
1286 snap_dest->valid = 0; 1286 snap_dest->valid = 0;
1287 up_write(&snap_dest->lock); 1287 up_write(&snap_dest->lock);
1288 DMERR("Cancelling snapshot handover."); 1288 DMERR("Cancelling snapshot handover.");
1289 } 1289 }
1290 up_read(&_origins_lock); 1290 up_read(&_origins_lock);
1291 1291
1292 if (dm_target_is_snapshot_merge(ti)) 1292 if (dm_target_is_snapshot_merge(ti))
1293 stop_merge(s); 1293 stop_merge(s);
1294 1294
1295 /* Prevent further origin writes from using this snapshot. */ 1295 /* Prevent further origin writes from using this snapshot. */
1296 /* After this returns there can be no new kcopyd jobs. */ 1296 /* After this returns there can be no new kcopyd jobs. */
1297 unregister_snapshot(s); 1297 unregister_snapshot(s);
1298 1298
1299 while (atomic_read(&s->pending_exceptions_count)) 1299 while (atomic_read(&s->pending_exceptions_count))
1300 msleep(1); 1300 msleep(1);
1301 /* 1301 /*
1302 * Ensure instructions in mempool_destroy aren't reordered 1302 * Ensure instructions in mempool_destroy aren't reordered
1303 * before atomic_read. 1303 * before atomic_read.
1304 */ 1304 */
1305 smp_mb(); 1305 smp_mb();
1306 1306
1307 #ifdef CONFIG_DM_DEBUG 1307 #ifdef CONFIG_DM_DEBUG
1308 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1308 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1309 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 1309 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1310 #endif 1310 #endif
1311 1311
1312 mempool_destroy(s->tracked_chunk_pool); 1312 mempool_destroy(s->tracked_chunk_pool);
1313 1313
1314 __free_exceptions(s); 1314 __free_exceptions(s);
1315 1315
1316 mempool_destroy(s->pending_pool); 1316 mempool_destroy(s->pending_pool);
1317 1317
1318 dm_put_device(ti, s->origin); 1318 dm_put_device(ti, s->origin);
1319 1319
1320 dm_exception_store_destroy(s->store); 1320 dm_exception_store_destroy(s->store);
1321 1321
1322 dm_put_device(ti, s->cow); 1322 dm_put_device(ti, s->cow);
1323 1323
1324 kfree(s); 1324 kfree(s);
1325 } 1325 }
1326 1326
1327 /* 1327 /*
1328 * Flush a list of buffers. 1328 * Flush a list of buffers.
1329 */ 1329 */
1330 static void flush_bios(struct bio *bio) 1330 static void flush_bios(struct bio *bio)
1331 { 1331 {
1332 struct bio *n; 1332 struct bio *n;
1333 1333
1334 while (bio) { 1334 while (bio) {
1335 n = bio->bi_next; 1335 n = bio->bi_next;
1336 bio->bi_next = NULL; 1336 bio->bi_next = NULL;
1337 generic_make_request(bio); 1337 generic_make_request(bio);
1338 bio = n; 1338 bio = n;
1339 } 1339 }
1340 } 1340 }
1341 1341
1342 static void flush_queued_bios(struct work_struct *work) 1342 static void flush_queued_bios(struct work_struct *work)
1343 { 1343 {
1344 struct dm_snapshot *s = 1344 struct dm_snapshot *s =
1345 container_of(work, struct dm_snapshot, queued_bios_work); 1345 container_of(work, struct dm_snapshot, queued_bios_work);
1346 struct bio *queued_bios; 1346 struct bio *queued_bios;
1347 unsigned long flags; 1347 unsigned long flags;
1348 1348
1349 spin_lock_irqsave(&s->pe_lock, flags); 1349 spin_lock_irqsave(&s->pe_lock, flags);
1350 queued_bios = bio_list_get(&s->queued_bios); 1350 queued_bios = bio_list_get(&s->queued_bios);
1351 spin_unlock_irqrestore(&s->pe_lock, flags); 1351 spin_unlock_irqrestore(&s->pe_lock, flags);
1352 1352
1353 flush_bios(queued_bios); 1353 flush_bios(queued_bios);
1354 } 1354 }
1355 1355
1356 static int do_origin(struct dm_dev *origin, struct bio *bio); 1356 static int do_origin(struct dm_dev *origin, struct bio *bio);
1357 1357
1358 /* 1358 /*
1359 * Flush a list of buffers. 1359 * Flush a list of buffers.
1360 */ 1360 */
1361 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) 1361 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1362 { 1362 {
1363 struct bio *n; 1363 struct bio *n;
1364 int r; 1364 int r;
1365 1365
1366 while (bio) { 1366 while (bio) {
1367 n = bio->bi_next; 1367 n = bio->bi_next;
1368 bio->bi_next = NULL; 1368 bio->bi_next = NULL;
1369 r = do_origin(s->origin, bio); 1369 r = do_origin(s->origin, bio);
1370 if (r == DM_MAPIO_REMAPPED) 1370 if (r == DM_MAPIO_REMAPPED)
1371 generic_make_request(bio); 1371 generic_make_request(bio);
1372 bio = n; 1372 bio = n;
1373 } 1373 }
1374 } 1374 }
1375 1375
1376 /* 1376 /*
1377 * Error a list of buffers. 1377 * Error a list of buffers.
1378 */ 1378 */
1379 static void error_bios(struct bio *bio) 1379 static void error_bios(struct bio *bio)
1380 { 1380 {
1381 struct bio *n; 1381 struct bio *n;
1382 1382
1383 while (bio) { 1383 while (bio) {
1384 n = bio->bi_next; 1384 n = bio->bi_next;
1385 bio->bi_next = NULL; 1385 bio->bi_next = NULL;
1386 bio_io_error(bio); 1386 bio_io_error(bio);
1387 bio = n; 1387 bio = n;
1388 } 1388 }
1389 } 1389 }
1390 1390
1391 static void __invalidate_snapshot(struct dm_snapshot *s, int err) 1391 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1392 { 1392 {
1393 if (!s->valid) 1393 if (!s->valid)
1394 return; 1394 return;
1395 1395
1396 if (err == -EIO) 1396 if (err == -EIO)
1397 DMERR("Invalidating snapshot: Error reading/writing."); 1397 DMERR("Invalidating snapshot: Error reading/writing.");
1398 else if (err == -ENOMEM) 1398 else if (err == -ENOMEM)
1399 DMERR("Invalidating snapshot: Unable to allocate exception."); 1399 DMERR("Invalidating snapshot: Unable to allocate exception.");
1400 1400
1401 if (s->store->type->drop_snapshot) 1401 if (s->store->type->drop_snapshot)
1402 s->store->type->drop_snapshot(s->store); 1402 s->store->type->drop_snapshot(s->store);
1403 1403
1404 s->valid = 0; 1404 s->valid = 0;
1405 1405
1406 dm_table_event(s->ti->table); 1406 dm_table_event(s->ti->table);
1407 } 1407 }
1408 1408
1409 static void pending_complete(struct dm_snap_pending_exception *pe, int success) 1409 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1410 { 1410 {
1411 struct dm_exception *e; 1411 struct dm_exception *e;
1412 struct dm_snapshot *s = pe->snap; 1412 struct dm_snapshot *s = pe->snap;
1413 struct bio *origin_bios = NULL; 1413 struct bio *origin_bios = NULL;
1414 struct bio *snapshot_bios = NULL; 1414 struct bio *snapshot_bios = NULL;
1415 int error = 0; 1415 int error = 0;
1416 1416
1417 if (!success) { 1417 if (!success) {
1418 /* Read/write error - snapshot is unusable */ 1418 /* Read/write error - snapshot is unusable */
1419 down_write(&s->lock); 1419 down_write(&s->lock);
1420 __invalidate_snapshot(s, -EIO); 1420 __invalidate_snapshot(s, -EIO);
1421 error = 1; 1421 error = 1;
1422 goto out; 1422 goto out;
1423 } 1423 }
1424 1424
1425 e = alloc_completed_exception(); 1425 e = alloc_completed_exception();
1426 if (!e) { 1426 if (!e) {
1427 down_write(&s->lock); 1427 down_write(&s->lock);
1428 __invalidate_snapshot(s, -ENOMEM); 1428 __invalidate_snapshot(s, -ENOMEM);
1429 error = 1; 1429 error = 1;
1430 goto out; 1430 goto out;
1431 } 1431 }
1432 *e = pe->e; 1432 *e = pe->e;
1433 1433
1434 down_write(&s->lock); 1434 down_write(&s->lock);
1435 if (!s->valid) { 1435 if (!s->valid) {
1436 free_completed_exception(e); 1436 free_completed_exception(e);
1437 error = 1; 1437 error = 1;
1438 goto out; 1438 goto out;
1439 } 1439 }
1440 1440
1441 /* Check for conflicting reads */ 1441 /* Check for conflicting reads */
1442 __check_for_conflicting_io(s, pe->e.old_chunk); 1442 __check_for_conflicting_io(s, pe->e.old_chunk);
1443 1443
1444 /* 1444 /*
1445 * Add a proper exception, and remove the 1445 * Add a proper exception, and remove the
1446 * in-flight exception from the list. 1446 * in-flight exception from the list.
1447 */ 1447 */
1448 dm_insert_exception(&s->complete, e); 1448 dm_insert_exception(&s->complete, e);
1449 1449
1450 out: 1450 out:
1451 dm_remove_exception(&pe->e); 1451 dm_remove_exception(&pe->e);
1452 snapshot_bios = bio_list_get(&pe->snapshot_bios); 1452 snapshot_bios = bio_list_get(&pe->snapshot_bios);
1453 origin_bios = bio_list_get(&pe->origin_bios); 1453 origin_bios = bio_list_get(&pe->origin_bios);
1454 free_pending_exception(pe); 1454 free_pending_exception(pe);
1455 1455
1456 increment_pending_exceptions_done_count(); 1456 increment_pending_exceptions_done_count();
1457 1457
1458 up_write(&s->lock); 1458 up_write(&s->lock);
1459 1459
1460 /* Submit any pending write bios */ 1460 /* Submit any pending write bios */
1461 if (error) 1461 if (error)
1462 error_bios(snapshot_bios); 1462 error_bios(snapshot_bios);
1463 else 1463 else
1464 flush_bios(snapshot_bios); 1464 flush_bios(snapshot_bios);
1465 1465
1466 retry_origin_bios(s, origin_bios); 1466 retry_origin_bios(s, origin_bios);
1467 } 1467 }
1468 1468
1469 static void commit_callback(void *context, int success) 1469 static void commit_callback(void *context, int success)
1470 { 1470 {
1471 struct dm_snap_pending_exception *pe = context; 1471 struct dm_snap_pending_exception *pe = context;
1472 1472
1473 pending_complete(pe, success); 1473 pending_complete(pe, success);
1474 } 1474 }
1475 1475
1476 /* 1476 /*
1477 * Called when the copy I/O has finished. kcopyd actually runs 1477 * Called when the copy I/O has finished. kcopyd actually runs
1478 * this code so don't block. 1478 * this code so don't block.
1479 */ 1479 */
1480 static void copy_callback(int read_err, unsigned long write_err, void *context) 1480 static void copy_callback(int read_err, unsigned long write_err, void *context)
1481 { 1481 {
1482 struct dm_snap_pending_exception *pe = context; 1482 struct dm_snap_pending_exception *pe = context;
1483 struct dm_snapshot *s = pe->snap; 1483 struct dm_snapshot *s = pe->snap;
1484 1484
1485 if (read_err || write_err) 1485 if (read_err || write_err)
1486 pending_complete(pe, 0); 1486 pending_complete(pe, 0);
1487 1487
1488 else 1488 else
1489 /* Update the metadata if we are persistent */ 1489 /* Update the metadata if we are persistent */
1490 s->store->type->commit_exception(s->store, &pe->e, 1490 s->store->type->commit_exception(s->store, &pe->e,
1491 commit_callback, pe); 1491 commit_callback, pe);
1492 } 1492 }
1493 1493
1494 /* 1494 /*
1495 * Dispatches the copy operation to kcopyd. 1495 * Dispatches the copy operation to kcopyd.
1496 */ 1496 */
1497 static void start_copy(struct dm_snap_pending_exception *pe) 1497 static void start_copy(struct dm_snap_pending_exception *pe)
1498 { 1498 {
1499 struct dm_snapshot *s = pe->snap; 1499 struct dm_snapshot *s = pe->snap;
1500 struct dm_io_region src, dest; 1500 struct dm_io_region src, dest;
1501 struct block_device *bdev = s->origin->bdev; 1501 struct block_device *bdev = s->origin->bdev;
1502 sector_t dev_size; 1502 sector_t dev_size;
1503 1503
1504 dev_size = get_dev_size(bdev); 1504 dev_size = get_dev_size(bdev);
1505 1505
1506 src.bdev = bdev; 1506 src.bdev = bdev;
1507 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 1507 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1508 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); 1508 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1509 1509
1510 dest.bdev = s->cow->bdev; 1510 dest.bdev = s->cow->bdev;
1511 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 1511 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1512 dest.count = src.count; 1512 dest.count = src.count;
1513 1513
1514 /* Hand over to kcopyd */ 1514 /* Hand over to kcopyd */
1515 dm_kcopyd_copy(s->kcopyd_client, 1515 dm_kcopyd_copy(s->kcopyd_client,
1516 &src, 1, &dest, 0, copy_callback, pe); 1516 &src, 1, &dest, 0, copy_callback, pe);
1517 } 1517 }
1518 1518
1519 static struct dm_snap_pending_exception * 1519 static struct dm_snap_pending_exception *
1520 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) 1520 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1521 { 1521 {
1522 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); 1522 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1523 1523
1524 if (!e) 1524 if (!e)
1525 return NULL; 1525 return NULL;
1526 1526
1527 return container_of(e, struct dm_snap_pending_exception, e); 1527 return container_of(e, struct dm_snap_pending_exception, e);
1528 } 1528 }
1529 1529
1530 /* 1530 /*
1531 * Looks to see if this snapshot already has a pending exception 1531 * Looks to see if this snapshot already has a pending exception
1532 * for this chunk, otherwise it allocates a new one and inserts 1532 * for this chunk, otherwise it allocates a new one and inserts
1533 * it into the pending table. 1533 * it into the pending table.
1534 * 1534 *
1535 * NOTE: a write lock must be held on snap->lock before calling 1535 * NOTE: a write lock must be held on snap->lock before calling
1536 * this. 1536 * this.
1537 */ 1537 */
1538 static struct dm_snap_pending_exception * 1538 static struct dm_snap_pending_exception *
1539 __find_pending_exception(struct dm_snapshot *s, 1539 __find_pending_exception(struct dm_snapshot *s,
1540 struct dm_snap_pending_exception *pe, chunk_t chunk) 1540 struct dm_snap_pending_exception *pe, chunk_t chunk)
1541 { 1541 {
1542 struct dm_snap_pending_exception *pe2; 1542 struct dm_snap_pending_exception *pe2;
1543 1543
1544 pe2 = __lookup_pending_exception(s, chunk); 1544 pe2 = __lookup_pending_exception(s, chunk);
1545 if (pe2) { 1545 if (pe2) {
1546 free_pending_exception(pe); 1546 free_pending_exception(pe);
1547 return pe2; 1547 return pe2;
1548 } 1548 }
1549 1549
1550 pe->e.old_chunk = chunk; 1550 pe->e.old_chunk = chunk;
1551 bio_list_init(&pe->origin_bios); 1551 bio_list_init(&pe->origin_bios);
1552 bio_list_init(&pe->snapshot_bios); 1552 bio_list_init(&pe->snapshot_bios);
1553 pe->started = 0; 1553 pe->started = 0;
1554 1554
1555 if (s->store->type->prepare_exception(s->store, &pe->e)) { 1555 if (s->store->type->prepare_exception(s->store, &pe->e)) {
1556 free_pending_exception(pe); 1556 free_pending_exception(pe);
1557 return NULL; 1557 return NULL;
1558 } 1558 }
1559 1559
1560 dm_insert_exception(&s->pending, &pe->e); 1560 dm_insert_exception(&s->pending, &pe->e);
1561 1561
1562 return pe; 1562 return pe;
1563 } 1563 }
1564 1564
1565 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, 1565 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1566 struct bio *bio, chunk_t chunk) 1566 struct bio *bio, chunk_t chunk)
1567 { 1567 {
1568 bio->bi_bdev = s->cow->bdev; 1568 bio->bi_bdev = s->cow->bdev;
1569 bio->bi_sector = chunk_to_sector(s->store, 1569 bio->bi_sector = chunk_to_sector(s->store,
1570 dm_chunk_number(e->new_chunk) + 1570 dm_chunk_number(e->new_chunk) +
1571 (chunk - e->old_chunk)) + 1571 (chunk - e->old_chunk)) +
1572 (bio->bi_sector & 1572 (bio->bi_sector &
1573 s->store->chunk_mask); 1573 s->store->chunk_mask);
1574 } 1574 }
1575 1575
1576 static int snapshot_map(struct dm_target *ti, struct bio *bio, 1576 static int snapshot_map(struct dm_target *ti, struct bio *bio,
1577 union map_info *map_context) 1577 union map_info *map_context)
1578 { 1578 {
1579 struct dm_exception *e; 1579 struct dm_exception *e;
1580 struct dm_snapshot *s = ti->private; 1580 struct dm_snapshot *s = ti->private;
1581 int r = DM_MAPIO_REMAPPED; 1581 int r = DM_MAPIO_REMAPPED;
1582 chunk_t chunk; 1582 chunk_t chunk;
1583 struct dm_snap_pending_exception *pe = NULL; 1583 struct dm_snap_pending_exception *pe = NULL;
1584 1584
1585 if (unlikely(bio_empty_barrier(bio))) { 1585 if (unlikely(bio_empty_barrier(bio))) {
1586 bio->bi_bdev = s->cow->bdev; 1586 bio->bi_bdev = s->cow->bdev;
1587 return DM_MAPIO_REMAPPED; 1587 return DM_MAPIO_REMAPPED;
1588 } 1588 }
1589 1589
1590 chunk = sector_to_chunk(s->store, bio->bi_sector); 1590 chunk = sector_to_chunk(s->store, bio->bi_sector);
1591 1591
1592 /* Full snapshots are not usable */ 1592 /* Full snapshots are not usable */
1593 /* To get here the table must be live so s->active is always set. */ 1593 /* To get here the table must be live so s->active is always set. */
1594 if (!s->valid) 1594 if (!s->valid)
1595 return -EIO; 1595 return -EIO;
1596 1596
1597 /* FIXME: should only take write lock if we need 1597 /* FIXME: should only take write lock if we need
1598 * to copy an exception */ 1598 * to copy an exception */
1599 down_write(&s->lock); 1599 down_write(&s->lock);
1600 1600
1601 if (!s->valid) { 1601 if (!s->valid) {
1602 r = -EIO; 1602 r = -EIO;
1603 goto out_unlock; 1603 goto out_unlock;
1604 } 1604 }
1605 1605
1606 /* If the block is already remapped - use that, else remap it */ 1606 /* If the block is already remapped - use that, else remap it */
1607 e = dm_lookup_exception(&s->complete, chunk); 1607 e = dm_lookup_exception(&s->complete, chunk);
1608 if (e) { 1608 if (e) {
1609 remap_exception(s, e, bio, chunk); 1609 remap_exception(s, e, bio, chunk);
1610 goto out_unlock; 1610 goto out_unlock;
1611 } 1611 }
1612 1612
1613 /* 1613 /*
1614 * Write to snapshot - higher level takes care of RW/RO 1614 * Write to snapshot - higher level takes care of RW/RO
1615 * flags so we should only get this if we are 1615 * flags so we should only get this if we are
1616 * writeable. 1616 * writeable.
1617 */ 1617 */
1618 if (bio_rw(bio) == WRITE) { 1618 if (bio_rw(bio) == WRITE) {
1619 pe = __lookup_pending_exception(s, chunk); 1619 pe = __lookup_pending_exception(s, chunk);
1620 if (!pe) { 1620 if (!pe) {
1621 up_write(&s->lock); 1621 up_write(&s->lock);
1622 pe = alloc_pending_exception(s); 1622 pe = alloc_pending_exception(s);
1623 down_write(&s->lock); 1623 down_write(&s->lock);
1624 1624
1625 if (!s->valid) { 1625 if (!s->valid) {
1626 free_pending_exception(pe); 1626 free_pending_exception(pe);
1627 r = -EIO; 1627 r = -EIO;
1628 goto out_unlock; 1628 goto out_unlock;
1629 } 1629 }
1630 1630
1631 e = dm_lookup_exception(&s->complete, chunk); 1631 e = dm_lookup_exception(&s->complete, chunk);
1632 if (e) { 1632 if (e) {
1633 free_pending_exception(pe); 1633 free_pending_exception(pe);
1634 remap_exception(s, e, bio, chunk); 1634 remap_exception(s, e, bio, chunk);
1635 goto out_unlock; 1635 goto out_unlock;
1636 } 1636 }
1637 1637
1638 pe = __find_pending_exception(s, pe, chunk); 1638 pe = __find_pending_exception(s, pe, chunk);
1639 if (!pe) { 1639 if (!pe) {
1640 __invalidate_snapshot(s, -ENOMEM); 1640 __invalidate_snapshot(s, -ENOMEM);
1641 r = -EIO; 1641 r = -EIO;
1642 goto out_unlock; 1642 goto out_unlock;
1643 } 1643 }
1644 } 1644 }
1645 1645
1646 remap_exception(s, &pe->e, bio, chunk); 1646 remap_exception(s, &pe->e, bio, chunk);
1647 bio_list_add(&pe->snapshot_bios, bio); 1647 bio_list_add(&pe->snapshot_bios, bio);
1648 1648
1649 r = DM_MAPIO_SUBMITTED; 1649 r = DM_MAPIO_SUBMITTED;
1650 1650
1651 if (!pe->started) { 1651 if (!pe->started) {
1652 /* this is protected by snap->lock */ 1652 /* this is protected by snap->lock */
1653 pe->started = 1; 1653 pe->started = 1;
1654 up_write(&s->lock); 1654 up_write(&s->lock);
1655 start_copy(pe); 1655 start_copy(pe);
1656 goto out; 1656 goto out;
1657 } 1657 }
1658 } else { 1658 } else {
1659 bio->bi_bdev = s->origin->bdev; 1659 bio->bi_bdev = s->origin->bdev;
1660 map_context->ptr = track_chunk(s, chunk); 1660 map_context->ptr = track_chunk(s, chunk);
1661 } 1661 }
1662 1662
1663 out_unlock: 1663 out_unlock:
1664 up_write(&s->lock); 1664 up_write(&s->lock);
1665 out: 1665 out:
1666 return r; 1666 return r;
1667 } 1667 }
1668 1668
1669 /* 1669 /*
1670 * A snapshot-merge target behaves like a combination of a snapshot 1670 * A snapshot-merge target behaves like a combination of a snapshot
1671 * target and a snapshot-origin target. It only generates new 1671 * target and a snapshot-origin target. It only generates new
1672 * exceptions in other snapshots and not in the one that is being 1672 * exceptions in other snapshots and not in the one that is being
1673 * merged. 1673 * merged.
1674 * 1674 *
1675 * For each chunk, if there is an existing exception, it is used to 1675 * For each chunk, if there is an existing exception, it is used to
1676 * redirect I/O to the cow device. Otherwise I/O is sent to the origin, 1676 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1677 * which in turn might generate exceptions in other snapshots. 1677 * which in turn might generate exceptions in other snapshots.
1678 * If merging is currently taking place on the chunk in question, the 1678 * If merging is currently taking place on the chunk in question, the
1679 * I/O is deferred by adding it to s->bios_queued_during_merge. 1679 * I/O is deferred by adding it to s->bios_queued_during_merge.
1680 */ 1680 */
1681 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, 1681 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1682 union map_info *map_context) 1682 union map_info *map_context)
1683 { 1683 {
1684 struct dm_exception *e; 1684 struct dm_exception *e;
1685 struct dm_snapshot *s = ti->private; 1685 struct dm_snapshot *s = ti->private;
1686 int r = DM_MAPIO_REMAPPED; 1686 int r = DM_MAPIO_REMAPPED;
1687 chunk_t chunk; 1687 chunk_t chunk;
1688 1688
1689 if (unlikely(bio_empty_barrier(bio))) { 1689 if (unlikely(bio_empty_barrier(bio))) {
1690 if (!map_context->flush_request) 1690 if (!map_context->flush_request)
1691 bio->bi_bdev = s->origin->bdev; 1691 bio->bi_bdev = s->origin->bdev;
1692 else 1692 else
1693 bio->bi_bdev = s->cow->bdev; 1693 bio->bi_bdev = s->cow->bdev;
1694 map_context->ptr = NULL; 1694 map_context->ptr = NULL;
1695 return DM_MAPIO_REMAPPED; 1695 return DM_MAPIO_REMAPPED;
1696 } 1696 }
1697 1697
1698 chunk = sector_to_chunk(s->store, bio->bi_sector); 1698 chunk = sector_to_chunk(s->store, bio->bi_sector);
1699 1699
1700 down_write(&s->lock); 1700 down_write(&s->lock);
1701 1701
1702 /* Full snapshots are not usable */ 1702 /* Full merging snapshots are redirected to the origin */
1703 if (!s->valid) { 1703 if (!s->valid)
1704 r = -EIO; 1704 goto redirect_to_origin;
1705 goto out_unlock;
1706 }
1707 1705
1708 /* If the block is already remapped - use that */ 1706 /* If the block is already remapped - use that */
1709 e = dm_lookup_exception(&s->complete, chunk); 1707 e = dm_lookup_exception(&s->complete, chunk);
1710 if (e) { 1708 if (e) {
1711 /* Queue writes overlapping with chunks being merged */ 1709 /* Queue writes overlapping with chunks being merged */
1712 if (bio_rw(bio) == WRITE && 1710 if (bio_rw(bio) == WRITE &&
1713 chunk >= s->first_merging_chunk && 1711 chunk >= s->first_merging_chunk &&
1714 chunk < (s->first_merging_chunk + 1712 chunk < (s->first_merging_chunk +
1715 s->num_merging_chunks)) { 1713 s->num_merging_chunks)) {
1716 bio->bi_bdev = s->origin->bdev; 1714 bio->bi_bdev = s->origin->bdev;
1717 bio_list_add(&s->bios_queued_during_merge, bio); 1715 bio_list_add(&s->bios_queued_during_merge, bio);
1718 r = DM_MAPIO_SUBMITTED; 1716 r = DM_MAPIO_SUBMITTED;
1719 goto out_unlock; 1717 goto out_unlock;
1720 } 1718 }
1721 1719
1722 remap_exception(s, e, bio, chunk); 1720 remap_exception(s, e, bio, chunk);
1723 1721
1724 if (bio_rw(bio) == WRITE) 1722 if (bio_rw(bio) == WRITE)
1725 map_context->ptr = track_chunk(s, chunk); 1723 map_context->ptr = track_chunk(s, chunk);
1726 goto out_unlock; 1724 goto out_unlock;
1727 } 1725 }
1728 1726
1727 redirect_to_origin:
1729 bio->bi_bdev = s->origin->bdev; 1728 bio->bi_bdev = s->origin->bdev;
1730 1729
1731 if (bio_rw(bio) == WRITE) { 1730 if (bio_rw(bio) == WRITE) {
1732 up_write(&s->lock); 1731 up_write(&s->lock);
1733 return do_origin(s->origin, bio); 1732 return do_origin(s->origin, bio);
1734 } 1733 }
1735 1734
1736 out_unlock: 1735 out_unlock:
1737 up_write(&s->lock); 1736 up_write(&s->lock);
1738 1737
1739 return r; 1738 return r;
1740 } 1739 }
1741 1740
1742 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1741 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1743 int error, union map_info *map_context) 1742 int error, union map_info *map_context)
1744 { 1743 {
1745 struct dm_snapshot *s = ti->private; 1744 struct dm_snapshot *s = ti->private;
1746 struct dm_snap_tracked_chunk *c = map_context->ptr; 1745 struct dm_snap_tracked_chunk *c = map_context->ptr;
1747 1746
1748 if (c) 1747 if (c)
1749 stop_tracking_chunk(s, c); 1748 stop_tracking_chunk(s, c);
1750 1749
1751 return 0; 1750 return 0;
1752 } 1751 }
1753 1752
1754 static void snapshot_merge_presuspend(struct dm_target *ti) 1753 static void snapshot_merge_presuspend(struct dm_target *ti)
1755 { 1754 {
1756 struct dm_snapshot *s = ti->private; 1755 struct dm_snapshot *s = ti->private;
1757 1756
1758 stop_merge(s); 1757 stop_merge(s);
1759 } 1758 }
1760 1759
1761 static void snapshot_postsuspend(struct dm_target *ti) 1760 static void snapshot_postsuspend(struct dm_target *ti)
1762 { 1761 {
1763 struct dm_snapshot *s = ti->private; 1762 struct dm_snapshot *s = ti->private;
1764 1763
1765 down_write(&s->lock); 1764 down_write(&s->lock);
1766 s->suspended = 1; 1765 s->suspended = 1;
1767 up_write(&s->lock); 1766 up_write(&s->lock);
1768 } 1767 }
1769 1768
1770 static int snapshot_preresume(struct dm_target *ti) 1769 static int snapshot_preresume(struct dm_target *ti)
1771 { 1770 {
1772 int r = 0; 1771 int r = 0;
1773 struct dm_snapshot *s = ti->private; 1772 struct dm_snapshot *s = ti->private;
1774 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1773 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1775 1774
1776 down_read(&_origins_lock); 1775 down_read(&_origins_lock);
1777 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1776 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1778 if (snap_src && snap_dest) { 1777 if (snap_src && snap_dest) {
1779 down_read(&snap_src->lock); 1778 down_read(&snap_src->lock);
1780 if (s == snap_src) { 1779 if (s == snap_src) {
1781 DMERR("Unable to resume snapshot source until " 1780 DMERR("Unable to resume snapshot source until "
1782 "handover completes."); 1781 "handover completes.");
1783 r = -EINVAL; 1782 r = -EINVAL;
1784 } else if (!snap_src->suspended) { 1783 } else if (!snap_src->suspended) {
1785 DMERR("Unable to perform snapshot handover until " 1784 DMERR("Unable to perform snapshot handover until "
1786 "source is suspended."); 1785 "source is suspended.");
1787 r = -EINVAL; 1786 r = -EINVAL;
1788 } 1787 }
1789 up_read(&snap_src->lock); 1788 up_read(&snap_src->lock);
1790 } 1789 }
1791 up_read(&_origins_lock); 1790 up_read(&_origins_lock);
1792 1791
1793 return r; 1792 return r;
1794 } 1793 }
1795 1794
1796 static void snapshot_resume(struct dm_target *ti) 1795 static void snapshot_resume(struct dm_target *ti)
1797 { 1796 {
1798 struct dm_snapshot *s = ti->private; 1797 struct dm_snapshot *s = ti->private;
1799 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1798 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1800 1799
1801 down_read(&_origins_lock); 1800 down_read(&_origins_lock);
1802 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1801 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1803 if (snap_src && snap_dest) { 1802 if (snap_src && snap_dest) {
1804 down_write(&snap_src->lock); 1803 down_write(&snap_src->lock);
1805 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); 1804 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1806 __handover_exceptions(snap_src, snap_dest); 1805 __handover_exceptions(snap_src, snap_dest);
1807 up_write(&snap_dest->lock); 1806 up_write(&snap_dest->lock);
1808 up_write(&snap_src->lock); 1807 up_write(&snap_src->lock);
1809 } 1808 }
1810 up_read(&_origins_lock); 1809 up_read(&_origins_lock);
1811 1810
1812 /* Now we have correct chunk size, reregister */ 1811 /* Now we have correct chunk size, reregister */
1813 reregister_snapshot(s); 1812 reregister_snapshot(s);
1814 1813
1815 down_write(&s->lock); 1814 down_write(&s->lock);
1816 s->active = 1; 1815 s->active = 1;
1817 s->suspended = 0; 1816 s->suspended = 0;
1818 up_write(&s->lock); 1817 up_write(&s->lock);
1819 } 1818 }
1820 1819
1821 static sector_t get_origin_minimum_chunksize(struct block_device *bdev) 1820 static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1822 { 1821 {
1823 sector_t min_chunksize; 1822 sector_t min_chunksize;
1824 1823
1825 down_read(&_origins_lock); 1824 down_read(&_origins_lock);
1826 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); 1825 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1827 up_read(&_origins_lock); 1826 up_read(&_origins_lock);
1828 1827
1829 return min_chunksize; 1828 return min_chunksize;
1830 } 1829 }
1831 1830
1832 static void snapshot_merge_resume(struct dm_target *ti) 1831 static void snapshot_merge_resume(struct dm_target *ti)
1833 { 1832 {
1834 struct dm_snapshot *s = ti->private; 1833 struct dm_snapshot *s = ti->private;
1835 1834
1836 /* 1835 /*
1837 * Handover exceptions from existing snapshot. 1836 * Handover exceptions from existing snapshot.
1838 */ 1837 */
1839 snapshot_resume(ti); 1838 snapshot_resume(ti);
1840 1839
1841 /* 1840 /*
1842 * snapshot-merge acts as an origin, so set ti->split_io 1841 * snapshot-merge acts as an origin, so set ti->split_io
1843 */ 1842 */
1844 ti->split_io = get_origin_minimum_chunksize(s->origin->bdev); 1843 ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1845 1844
1846 start_merge(s); 1845 start_merge(s);
1847 } 1846 }
1848 1847
1849 static int snapshot_status(struct dm_target *ti, status_type_t type, 1848 static int snapshot_status(struct dm_target *ti, status_type_t type,
1850 char *result, unsigned int maxlen) 1849 char *result, unsigned int maxlen)
1851 { 1850 {
1852 unsigned sz = 0; 1851 unsigned sz = 0;
1853 struct dm_snapshot *snap = ti->private; 1852 struct dm_snapshot *snap = ti->private;
1854 1853
1855 switch (type) { 1854 switch (type) {
1856 case STATUSTYPE_INFO: 1855 case STATUSTYPE_INFO:
1857 1856
1858 down_write(&snap->lock); 1857 down_write(&snap->lock);
1859 1858
1860 if (!snap->valid) 1859 if (!snap->valid)
1861 DMEMIT("Invalid"); 1860 DMEMIT("Invalid");
1862 else if (snap->merge_failed) 1861 else if (snap->merge_failed)
1863 DMEMIT("Merge failed"); 1862 DMEMIT("Merge failed");
1864 else { 1863 else {
1865 if (snap->store->type->usage) { 1864 if (snap->store->type->usage) {
1866 sector_t total_sectors, sectors_allocated, 1865 sector_t total_sectors, sectors_allocated,
1867 metadata_sectors; 1866 metadata_sectors;
1868 snap->store->type->usage(snap->store, 1867 snap->store->type->usage(snap->store,
1869 &total_sectors, 1868 &total_sectors,
1870 &sectors_allocated, 1869 &sectors_allocated,
1871 &metadata_sectors); 1870 &metadata_sectors);
1872 DMEMIT("%llu/%llu %llu", 1871 DMEMIT("%llu/%llu %llu",
1873 (unsigned long long)sectors_allocated, 1872 (unsigned long long)sectors_allocated,
1874 (unsigned long long)total_sectors, 1873 (unsigned long long)total_sectors,
1875 (unsigned long long)metadata_sectors); 1874 (unsigned long long)metadata_sectors);
1876 } 1875 }
1877 else 1876 else
1878 DMEMIT("Unknown"); 1877 DMEMIT("Unknown");
1879 } 1878 }
1880 1879
1881 up_write(&snap->lock); 1880 up_write(&snap->lock);
1882 1881
1883 break; 1882 break;
1884 1883
1885 case STATUSTYPE_TABLE: 1884 case STATUSTYPE_TABLE:
1886 /* 1885 /*
1887 * kdevname returns a static pointer so we need 1886 * kdevname returns a static pointer so we need
1888 * to make private copies if the output is to 1887 * to make private copies if the output is to
1889 * make sense. 1888 * make sense.
1890 */ 1889 */
1891 DMEMIT("%s %s", snap->origin->name, snap->cow->name); 1890 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1892 snap->store->type->status(snap->store, type, result + sz, 1891 snap->store->type->status(snap->store, type, result + sz,
1893 maxlen - sz); 1892 maxlen - sz);
1894 break; 1893 break;
1895 } 1894 }
1896 1895
1897 return 0; 1896 return 0;
1898 } 1897 }
1899 1898
1900 static int snapshot_iterate_devices(struct dm_target *ti, 1899 static int snapshot_iterate_devices(struct dm_target *ti,
1901 iterate_devices_callout_fn fn, void *data) 1900 iterate_devices_callout_fn fn, void *data)
1902 { 1901 {
1903 struct dm_snapshot *snap = ti->private; 1902 struct dm_snapshot *snap = ti->private;
1904 1903
1905 return fn(ti, snap->origin, 0, ti->len, data); 1904 return fn(ti, snap->origin, 0, ti->len, data);
1906 } 1905 }
1907 1906
1908 1907
1909 /*----------------------------------------------------------------- 1908 /*-----------------------------------------------------------------
1910 * Origin methods 1909 * Origin methods
1911 *---------------------------------------------------------------*/ 1910 *---------------------------------------------------------------*/
1912 1911
1913 /* 1912 /*
1914 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any 1913 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1915 * supplied bio was ignored. The caller may submit it immediately. 1914 * supplied bio was ignored. The caller may submit it immediately.
1916 * (No remapping actually occurs as the origin is always a direct linear 1915 * (No remapping actually occurs as the origin is always a direct linear
1917 * map.) 1916 * map.)
1918 * 1917 *
1919 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned 1918 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1920 * and any supplied bio is added to a list to be submitted once all 1919 * and any supplied bio is added to a list to be submitted once all
1921 * the necessary exceptions exist. 1920 * the necessary exceptions exist.
1922 */ 1921 */
1923 static int __origin_write(struct list_head *snapshots, sector_t sector, 1922 static int __origin_write(struct list_head *snapshots, sector_t sector,
1924 struct bio *bio) 1923 struct bio *bio)
1925 { 1924 {
1926 int r = DM_MAPIO_REMAPPED; 1925 int r = DM_MAPIO_REMAPPED;
1927 struct dm_snapshot *snap; 1926 struct dm_snapshot *snap;
1928 struct dm_exception *e; 1927 struct dm_exception *e;
1929 struct dm_snap_pending_exception *pe; 1928 struct dm_snap_pending_exception *pe;
1930 struct dm_snap_pending_exception *pe_to_start_now = NULL; 1929 struct dm_snap_pending_exception *pe_to_start_now = NULL;
1931 struct dm_snap_pending_exception *pe_to_start_last = NULL; 1930 struct dm_snap_pending_exception *pe_to_start_last = NULL;
1932 chunk_t chunk; 1931 chunk_t chunk;
1933 1932
1934 /* Do all the snapshots on this origin */ 1933 /* Do all the snapshots on this origin */
1935 list_for_each_entry (snap, snapshots, list) { 1934 list_for_each_entry (snap, snapshots, list) {
1936 /* 1935 /*
1937 * Don't make new exceptions in a merging snapshot 1936 * Don't make new exceptions in a merging snapshot
1938 * because it has effectively been deleted 1937 * because it has effectively been deleted
1939 */ 1938 */
1940 if (dm_target_is_snapshot_merge(snap->ti)) 1939 if (dm_target_is_snapshot_merge(snap->ti))
1941 continue; 1940 continue;
1942 1941
1943 down_write(&snap->lock); 1942 down_write(&snap->lock);
1944 1943
1945 /* Only deal with valid and active snapshots */ 1944 /* Only deal with valid and active snapshots */
1946 if (!snap->valid || !snap->active) 1945 if (!snap->valid || !snap->active)
1947 goto next_snapshot; 1946 goto next_snapshot;
1948 1947
1949 /* Nothing to do if writing beyond end of snapshot */ 1948 /* Nothing to do if writing beyond end of snapshot */
1950 if (sector >= dm_table_get_size(snap->ti->table)) 1949 if (sector >= dm_table_get_size(snap->ti->table))
1951 goto next_snapshot; 1950 goto next_snapshot;
1952 1951
1953 /* 1952 /*
1954 * Remember, different snapshots can have 1953 * Remember, different snapshots can have
1955 * different chunk sizes. 1954 * different chunk sizes.
1956 */ 1955 */
1957 chunk = sector_to_chunk(snap->store, sector); 1956 chunk = sector_to_chunk(snap->store, sector);
1958 1957
1959 /* 1958 /*
1960 * Check exception table to see if block 1959 * Check exception table to see if block
1961 * is already remapped in this snapshot 1960 * is already remapped in this snapshot
1962 * and trigger an exception if not. 1961 * and trigger an exception if not.
1963 */ 1962 */
1964 e = dm_lookup_exception(&snap->complete, chunk); 1963 e = dm_lookup_exception(&snap->complete, chunk);
1965 if (e) 1964 if (e)
1966 goto next_snapshot; 1965 goto next_snapshot;
1967 1966
1968 pe = __lookup_pending_exception(snap, chunk); 1967 pe = __lookup_pending_exception(snap, chunk);
1969 if (!pe) { 1968 if (!pe) {
1970 up_write(&snap->lock); 1969 up_write(&snap->lock);
1971 pe = alloc_pending_exception(snap); 1970 pe = alloc_pending_exception(snap);
1972 down_write(&snap->lock); 1971 down_write(&snap->lock);
1973 1972
1974 if (!snap->valid) { 1973 if (!snap->valid) {
1975 free_pending_exception(pe); 1974 free_pending_exception(pe);
1976 goto next_snapshot; 1975 goto next_snapshot;
1977 } 1976 }
1978 1977
1979 e = dm_lookup_exception(&snap->complete, chunk); 1978 e = dm_lookup_exception(&snap->complete, chunk);
1980 if (e) { 1979 if (e) {
1981 free_pending_exception(pe); 1980 free_pending_exception(pe);
1982 goto next_snapshot; 1981 goto next_snapshot;
1983 } 1982 }
1984 1983
1985 pe = __find_pending_exception(snap, pe, chunk); 1984 pe = __find_pending_exception(snap, pe, chunk);
1986 if (!pe) { 1985 if (!pe) {
1987 __invalidate_snapshot(snap, -ENOMEM); 1986 __invalidate_snapshot(snap, -ENOMEM);
1988 goto next_snapshot; 1987 goto next_snapshot;
1989 } 1988 }
1990 } 1989 }
1991 1990
1992 r = DM_MAPIO_SUBMITTED; 1991 r = DM_MAPIO_SUBMITTED;
1993 1992
1994 /* 1993 /*
1995 * If an origin bio was supplied, queue it to wait for the 1994 * If an origin bio was supplied, queue it to wait for the
1996 * completion of this exception, and start this one last, 1995 * completion of this exception, and start this one last,
1997 * at the end of the function. 1996 * at the end of the function.
1998 */ 1997 */
1999 if (bio) { 1998 if (bio) {
2000 bio_list_add(&pe->origin_bios, bio); 1999 bio_list_add(&pe->origin_bios, bio);
2001 bio = NULL; 2000 bio = NULL;
2002 2001
2003 if (!pe->started) { 2002 if (!pe->started) {
2004 pe->started = 1; 2003 pe->started = 1;
2005 pe_to_start_last = pe; 2004 pe_to_start_last = pe;
2006 } 2005 }
2007 } 2006 }
2008 2007
2009 if (!pe->started) { 2008 if (!pe->started) {
2010 pe->started = 1; 2009 pe->started = 1;
2011 pe_to_start_now = pe; 2010 pe_to_start_now = pe;
2012 } 2011 }
2013 2012
2014 next_snapshot: 2013 next_snapshot:
2015 up_write(&snap->lock); 2014 up_write(&snap->lock);
2016 2015
2017 if (pe_to_start_now) { 2016 if (pe_to_start_now) {
2018 start_copy(pe_to_start_now); 2017 start_copy(pe_to_start_now);
2019 pe_to_start_now = NULL; 2018 pe_to_start_now = NULL;
2020 } 2019 }
2021 } 2020 }
2022 2021
2023 /* 2022 /*
2024 * Submit the exception against which the bio is queued last, 2023 * Submit the exception against which the bio is queued last,
2025 * to give the other exceptions a head start. 2024 * to give the other exceptions a head start.
2026 */ 2025 */
2027 if (pe_to_start_last) 2026 if (pe_to_start_last)
2028 start_copy(pe_to_start_last); 2027 start_copy(pe_to_start_last);
2029 2028
2030 return r; 2029 return r;
2031 } 2030 }
2032 2031
2033 /* 2032 /*
2034 * Called on a write from the origin driver. 2033 * Called on a write from the origin driver.
2035 */ 2034 */
2036 static int do_origin(struct dm_dev *origin, struct bio *bio) 2035 static int do_origin(struct dm_dev *origin, struct bio *bio)
2037 { 2036 {
2038 struct origin *o; 2037 struct origin *o;
2039 int r = DM_MAPIO_REMAPPED; 2038 int r = DM_MAPIO_REMAPPED;
2040 2039
2041 down_read(&_origins_lock); 2040 down_read(&_origins_lock);
2042 o = __lookup_origin(origin->bdev); 2041 o = __lookup_origin(origin->bdev);
2043 if (o) 2042 if (o)
2044 r = __origin_write(&o->snapshots, bio->bi_sector, bio); 2043 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
2045 up_read(&_origins_lock); 2044 up_read(&_origins_lock);
2046 2045
2047 return r; 2046 return r;
2048 } 2047 }
2049 2048
2050 /* 2049 /*
2051 * Trigger exceptions in all non-merging snapshots. 2050 * Trigger exceptions in all non-merging snapshots.
2052 * 2051 *
2053 * The chunk size of the merging snapshot may be larger than the chunk 2052 * The chunk size of the merging snapshot may be larger than the chunk
2054 * size of some other snapshot so we may need to reallocate multiple 2053 * size of some other snapshot so we may need to reallocate multiple
2055 * chunks in other snapshots. 2054 * chunks in other snapshots.
2056 * 2055 *
2057 * We scan all the overlapping exceptions in the other snapshots. 2056 * We scan all the overlapping exceptions in the other snapshots.
2058 * Returns 1 if anything was reallocated and must be waited for, 2057 * Returns 1 if anything was reallocated and must be waited for,
2059 * otherwise returns 0. 2058 * otherwise returns 0.
2060 * 2059 *
2061 * size must be a multiple of merging_snap's chunk_size. 2060 * size must be a multiple of merging_snap's chunk_size.
2062 */ 2061 */
2063 static int origin_write_extent(struct dm_snapshot *merging_snap, 2062 static int origin_write_extent(struct dm_snapshot *merging_snap,
2064 sector_t sector, unsigned size) 2063 sector_t sector, unsigned size)
2065 { 2064 {
2066 int must_wait = 0; 2065 int must_wait = 0;
2067 sector_t n; 2066 sector_t n;
2068 struct origin *o; 2067 struct origin *o;
2069 2068
2070 /* 2069 /*
2071 * The origin's __minimum_chunk_size() got stored in split_io 2070 * The origin's __minimum_chunk_size() got stored in split_io
2072 * by snapshot_merge_resume(). 2071 * by snapshot_merge_resume().
2073 */ 2072 */
2074 down_read(&_origins_lock); 2073 down_read(&_origins_lock);
2075 o = __lookup_origin(merging_snap->origin->bdev); 2074 o = __lookup_origin(merging_snap->origin->bdev);
2076 for (n = 0; n < size; n += merging_snap->ti->split_io) 2075 for (n = 0; n < size; n += merging_snap->ti->split_io)
2077 if (__origin_write(&o->snapshots, sector + n, NULL) == 2076 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2078 DM_MAPIO_SUBMITTED) 2077 DM_MAPIO_SUBMITTED)
2079 must_wait = 1; 2078 must_wait = 1;
2080 up_read(&_origins_lock); 2079 up_read(&_origins_lock);
2081 2080
2082 return must_wait; 2081 return must_wait;
2083 } 2082 }
2084 2083
2085 /* 2084 /*
2086 * Origin: maps a linear range of a device, with hooks for snapshotting. 2085 * Origin: maps a linear range of a device, with hooks for snapshotting.
2087 */ 2086 */
2088 2087
2089 /* 2088 /*
2090 * Construct an origin mapping: <dev_path> 2089 * Construct an origin mapping: <dev_path>
2091 * The context for an origin is merely a 'struct dm_dev *' 2090 * The context for an origin is merely a 'struct dm_dev *'
2092 * pointing to the real device. 2091 * pointing to the real device.
2093 */ 2092 */
2094 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 2093 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2095 { 2094 {
2096 int r; 2095 int r;
2097 struct dm_dev *dev; 2096 struct dm_dev *dev;
2098 2097
2099 if (argc != 1) { 2098 if (argc != 1) {
2100 ti->error = "origin: incorrect number of arguments"; 2099 ti->error = "origin: incorrect number of arguments";
2101 return -EINVAL; 2100 return -EINVAL;
2102 } 2101 }
2103 2102
2104 r = dm_get_device(ti, argv[0], 0, ti->len, 2103 r = dm_get_device(ti, argv[0], 0, ti->len,
2105 dm_table_get_mode(ti->table), &dev); 2104 dm_table_get_mode(ti->table), &dev);
2106 if (r) { 2105 if (r) {
2107 ti->error = "Cannot get target device"; 2106 ti->error = "Cannot get target device";
2108 return r; 2107 return r;
2109 } 2108 }
2110 2109
2111 ti->private = dev; 2110 ti->private = dev;
2112 ti->num_flush_requests = 1; 2111 ti->num_flush_requests = 1;
2113 2112
2114 return 0; 2113 return 0;
2115 } 2114 }
2116 2115
2117 static void origin_dtr(struct dm_target *ti) 2116 static void origin_dtr(struct dm_target *ti)
2118 { 2117 {
2119 struct dm_dev *dev = ti->private; 2118 struct dm_dev *dev = ti->private;
2120 dm_put_device(ti, dev); 2119 dm_put_device(ti, dev);
2121 } 2120 }
2122 2121
2123 static int origin_map(struct dm_target *ti, struct bio *bio, 2122 static int origin_map(struct dm_target *ti, struct bio *bio,
2124 union map_info *map_context) 2123 union map_info *map_context)
2125 { 2124 {
2126 struct dm_dev *dev = ti->private; 2125 struct dm_dev *dev = ti->private;
2127 bio->bi_bdev = dev->bdev; 2126 bio->bi_bdev = dev->bdev;
2128 2127
2129 if (unlikely(bio_empty_barrier(bio))) 2128 if (unlikely(bio_empty_barrier(bio)))
2130 return DM_MAPIO_REMAPPED; 2129 return DM_MAPIO_REMAPPED;
2131 2130
2132 /* Only tell snapshots if this is a write */ 2131 /* Only tell snapshots if this is a write */
2133 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; 2132 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2134 } 2133 }
2135 2134
2136 /* 2135 /*
2137 * Set the target "split_io" field to the minimum of all the snapshots' 2136 * Set the target "split_io" field to the minimum of all the snapshots'
2138 * chunk sizes. 2137 * chunk sizes.
2139 */ 2138 */
2140 static void origin_resume(struct dm_target *ti) 2139 static void origin_resume(struct dm_target *ti)
2141 { 2140 {
2142 struct dm_dev *dev = ti->private; 2141 struct dm_dev *dev = ti->private;
2143 2142
2144 ti->split_io = get_origin_minimum_chunksize(dev->bdev); 2143 ti->split_io = get_origin_minimum_chunksize(dev->bdev);
2145 } 2144 }
2146 2145
2147 static int origin_status(struct dm_target *ti, status_type_t type, char *result, 2146 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
2148 unsigned int maxlen) 2147 unsigned int maxlen)
2149 { 2148 {
2150 struct dm_dev *dev = ti->private; 2149 struct dm_dev *dev = ti->private;
2151 2150
2152 switch (type) { 2151 switch (type) {
2153 case STATUSTYPE_INFO: 2152 case STATUSTYPE_INFO:
2154 result[0] = '\0'; 2153 result[0] = '\0';
2155 break; 2154 break;
2156 2155
2157 case STATUSTYPE_TABLE: 2156 case STATUSTYPE_TABLE:
2158 snprintf(result, maxlen, "%s", dev->name); 2157 snprintf(result, maxlen, "%s", dev->name);
2159 break; 2158 break;
2160 } 2159 }
2161 2160
2162 return 0; 2161 return 0;
2163 } 2162 }
2164 2163
2165 static int origin_iterate_devices(struct dm_target *ti, 2164 static int origin_iterate_devices(struct dm_target *ti,
2166 iterate_devices_callout_fn fn, void *data) 2165 iterate_devices_callout_fn fn, void *data)
2167 { 2166 {
2168 struct dm_dev *dev = ti->private; 2167 struct dm_dev *dev = ti->private;
2169 2168
2170 return fn(ti, dev, 0, ti->len, data); 2169 return fn(ti, dev, 0, ti->len, data);
2171 } 2170 }
2172 2171
2173 static struct target_type origin_target = { 2172 static struct target_type origin_target = {
2174 .name = "snapshot-origin", 2173 .name = "snapshot-origin",
2175 .version = {1, 7, 0}, 2174 .version = {1, 7, 0},
2176 .module = THIS_MODULE, 2175 .module = THIS_MODULE,
2177 .ctr = origin_ctr, 2176 .ctr = origin_ctr,
2178 .dtr = origin_dtr, 2177 .dtr = origin_dtr,
2179 .map = origin_map, 2178 .map = origin_map,
2180 .resume = origin_resume, 2179 .resume = origin_resume,
2181 .status = origin_status, 2180 .status = origin_status,
2182 .iterate_devices = origin_iterate_devices, 2181 .iterate_devices = origin_iterate_devices,
2183 }; 2182 };
2184 2183
2185 static struct target_type snapshot_target = { 2184 static struct target_type snapshot_target = {
2186 .name = "snapshot", 2185 .name = "snapshot",
2187 .version = {1, 9, 0}, 2186 .version = {1, 9, 0},
2188 .module = THIS_MODULE, 2187 .module = THIS_MODULE,
2189 .ctr = snapshot_ctr, 2188 .ctr = snapshot_ctr,
2190 .dtr = snapshot_dtr, 2189 .dtr = snapshot_dtr,
2191 .map = snapshot_map, 2190 .map = snapshot_map,
2192 .end_io = snapshot_end_io, 2191 .end_io = snapshot_end_io,
2193 .postsuspend = snapshot_postsuspend, 2192 .postsuspend = snapshot_postsuspend,
2194 .preresume = snapshot_preresume, 2193 .preresume = snapshot_preresume,
2195 .resume = snapshot_resume, 2194 .resume = snapshot_resume,
2196 .status = snapshot_status, 2195 .status = snapshot_status,
2197 .iterate_devices = snapshot_iterate_devices, 2196 .iterate_devices = snapshot_iterate_devices,
2198 }; 2197 };
2199 2198
2200 static struct target_type merge_target = { 2199 static struct target_type merge_target = {
2201 .name = dm_snapshot_merge_target_name, 2200 .name = dm_snapshot_merge_target_name,
2202 .version = {1, 0, 0}, 2201 .version = {1, 0, 0},
2203 .module = THIS_MODULE, 2202 .module = THIS_MODULE,
2204 .ctr = snapshot_ctr, 2203 .ctr = snapshot_ctr,
2205 .dtr = snapshot_dtr, 2204 .dtr = snapshot_dtr,
2206 .map = snapshot_merge_map, 2205 .map = snapshot_merge_map,
2207 .end_io = snapshot_end_io, 2206 .end_io = snapshot_end_io,
2208 .presuspend = snapshot_merge_presuspend, 2207 .presuspend = snapshot_merge_presuspend,
2209 .postsuspend = snapshot_postsuspend, 2208 .postsuspend = snapshot_postsuspend,
2210 .preresume = snapshot_preresume, 2209 .preresume = snapshot_preresume,
2211 .resume = snapshot_merge_resume, 2210 .resume = snapshot_merge_resume,
2212 .status = snapshot_status, 2211 .status = snapshot_status,
2213 .iterate_devices = snapshot_iterate_devices, 2212 .iterate_devices = snapshot_iterate_devices,
2214 }; 2213 };
2215 2214
2216 static int __init dm_snapshot_init(void) 2215 static int __init dm_snapshot_init(void)
2217 { 2216 {
2218 int r; 2217 int r;
2219 2218
2220 r = dm_exception_store_init(); 2219 r = dm_exception_store_init();
2221 if (r) { 2220 if (r) {
2222 DMERR("Failed to initialize exception stores"); 2221 DMERR("Failed to initialize exception stores");
2223 return r; 2222 return r;
2224 } 2223 }
2225 2224
2226 r = dm_register_target(&snapshot_target); 2225 r = dm_register_target(&snapshot_target);
2227 if (r < 0) { 2226 if (r < 0) {
2228 DMERR("snapshot target register failed %d", r); 2227 DMERR("snapshot target register failed %d", r);
2229 goto bad_register_snapshot_target; 2228 goto bad_register_snapshot_target;
2230 } 2229 }
2231 2230
2232 r = dm_register_target(&origin_target); 2231 r = dm_register_target(&origin_target);
2233 if (r < 0) { 2232 if (r < 0) {
2234 DMERR("Origin target register failed %d", r); 2233 DMERR("Origin target register failed %d", r);
2235 goto bad_register_origin_target; 2234 goto bad_register_origin_target;
2236 } 2235 }
2237 2236
2238 r = dm_register_target(&merge_target); 2237 r = dm_register_target(&merge_target);
2239 if (r < 0) { 2238 if (r < 0) {
2240 DMERR("Merge target register failed %d", r); 2239 DMERR("Merge target register failed %d", r);
2241 goto bad_register_merge_target; 2240 goto bad_register_merge_target;
2242 } 2241 }
2243 2242
2244 r = init_origin_hash(); 2243 r = init_origin_hash();
2245 if (r) { 2244 if (r) {
2246 DMERR("init_origin_hash failed."); 2245 DMERR("init_origin_hash failed.");
2247 goto bad_origin_hash; 2246 goto bad_origin_hash;
2248 } 2247 }
2249 2248
2250 exception_cache = KMEM_CACHE(dm_exception, 0); 2249 exception_cache = KMEM_CACHE(dm_exception, 0);
2251 if (!exception_cache) { 2250 if (!exception_cache) {
2252 DMERR("Couldn't create exception cache."); 2251 DMERR("Couldn't create exception cache.");
2253 r = -ENOMEM; 2252 r = -ENOMEM;
2254 goto bad_exception_cache; 2253 goto bad_exception_cache;
2255 } 2254 }
2256 2255
2257 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 2256 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2258 if (!pending_cache) { 2257 if (!pending_cache) {
2259 DMERR("Couldn't create pending cache."); 2258 DMERR("Couldn't create pending cache.");
2260 r = -ENOMEM; 2259 r = -ENOMEM;
2261 goto bad_pending_cache; 2260 goto bad_pending_cache;
2262 } 2261 }
2263 2262
2264 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 2263 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
2265 if (!tracked_chunk_cache) { 2264 if (!tracked_chunk_cache) {
2266 DMERR("Couldn't create cache to track chunks in use."); 2265 DMERR("Couldn't create cache to track chunks in use.");
2267 r = -ENOMEM; 2266 r = -ENOMEM;
2268 goto bad_tracked_chunk_cache; 2267 goto bad_tracked_chunk_cache;
2269 } 2268 }
2270 2269
2271 ksnapd = create_singlethread_workqueue("ksnapd"); 2270 ksnapd = create_singlethread_workqueue("ksnapd");
2272 if (!ksnapd) { 2271 if (!ksnapd) {
2273 DMERR("Failed to create ksnapd workqueue."); 2272 DMERR("Failed to create ksnapd workqueue.");
2274 r = -ENOMEM; 2273 r = -ENOMEM;
2275 goto bad_pending_pool; 2274 goto bad_pending_pool;
2276 } 2275 }
2277 2276
2278 return 0; 2277 return 0;
2279 2278
2280 bad_pending_pool: 2279 bad_pending_pool:
2281 kmem_cache_destroy(tracked_chunk_cache); 2280 kmem_cache_destroy(tracked_chunk_cache);
2282 bad_tracked_chunk_cache: 2281 bad_tracked_chunk_cache:
2283 kmem_cache_destroy(pending_cache); 2282 kmem_cache_destroy(pending_cache);
2284 bad_pending_cache: 2283 bad_pending_cache:
2285 kmem_cache_destroy(exception_cache); 2284 kmem_cache_destroy(exception_cache);
2286 bad_exception_cache: 2285 bad_exception_cache:
2287 exit_origin_hash(); 2286 exit_origin_hash();
2288 bad_origin_hash: 2287 bad_origin_hash:
2289 dm_unregister_target(&merge_target); 2288 dm_unregister_target(&merge_target);
2290 bad_register_merge_target: 2289 bad_register_merge_target:
2291 dm_unregister_target(&origin_target); 2290 dm_unregister_target(&origin_target);
2292 bad_register_origin_target: 2291 bad_register_origin_target:
2293 dm_unregister_target(&snapshot_target); 2292 dm_unregister_target(&snapshot_target);
2294 bad_register_snapshot_target: 2293 bad_register_snapshot_target:
2295 dm_exception_store_exit(); 2294 dm_exception_store_exit();
2296 2295
2297 return r; 2296 return r;
2298 } 2297 }
2299 2298
2300 static void __exit dm_snapshot_exit(void) 2299 static void __exit dm_snapshot_exit(void)
2301 { 2300 {
2302 destroy_workqueue(ksnapd); 2301 destroy_workqueue(ksnapd);
2303 2302
2304 dm_unregister_target(&snapshot_target); 2303 dm_unregister_target(&snapshot_target);
2305 dm_unregister_target(&origin_target); 2304 dm_unregister_target(&origin_target);
2306 dm_unregister_target(&merge_target); 2305 dm_unregister_target(&merge_target);
2307 2306
2308 exit_origin_hash(); 2307 exit_origin_hash();
2309 kmem_cache_destroy(pending_cache); 2308 kmem_cache_destroy(pending_cache);
2310 kmem_cache_destroy(exception_cache); 2309 kmem_cache_destroy(exception_cache);
2311 kmem_cache_destroy(tracked_chunk_cache); 2310 kmem_cache_destroy(tracked_chunk_cache);
2312 2311
2313 dm_exception_store_exit(); 2312 dm_exception_store_exit();
2314 } 2313 }
2315 2314
2316 /* Module hooks */ 2315 /* Module hooks */
2317 module_init(dm_snapshot_init); 2316 module_init(dm_snapshot_init);
2318 module_exit(dm_snapshot_exit); 2317 module_exit(dm_snapshot_exit);
2319 2318
2320 MODULE_DESCRIPTION(DM_NAME " snapshot target"); 2319 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2321 MODULE_AUTHOR("Joe Thornber"); 2320 MODULE_AUTHOR("Joe Thornber");
2322 MODULE_LICENSE("GPL"); 2321 MODULE_LICENSE("GPL");