Commit 2c140a246dc0bc085b98eddde978060fcec1080c

Authored by Mikulas Patocka
Committed by Mike Snitzer
1 parent 7833b08e18

dm: allow remove to be deferred

This patch allows the removal of an open device to be deferred until
it is closed.  (Previously such a removal attempt would fail.)

The deferred remove functionality is enabled by setting the flag
DM_DEFERRED_REMOVE in the ioctl structure on DM_DEV_REMOVE or
DM_REMOVE_ALL ioctl.

On return from DM_DEV_REMOVE, the flag DM_DEFERRED_REMOVE indicates if
the device was removed immediately or flagged to be removed on close -
if the flag is clear, the device was removed.

On return from DM_DEV_STATUS and other ioctls, the flag
DM_DEFERRED_REMOVE is set if the device is scheduled to be removed on
closure.

A device that is scheduled to be deleted can be revived using the
message "@cancel_deferred_remove". This message clears the
DMF_DEFERRED_REMOVE flag so that the device won't be deleted on close.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

Showing 4 changed files with 99 additions and 12 deletions Side-by-side Diff

drivers/md/dm-ioctl.c
... ... @@ -57,7 +57,7 @@
57 57 static struct list_head _name_buckets[NUM_BUCKETS];
58 58 static struct list_head _uuid_buckets[NUM_BUCKETS];
59 59  
60   -static void dm_hash_remove_all(int keep_open_devices);
  60 +static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);
61 61  
62 62 /*
63 63 * Guards access to both hash tables.
... ... @@ -86,7 +86,7 @@
86 86  
87 87 static void dm_hash_exit(void)
88 88 {
89   - dm_hash_remove_all(0);
  89 + dm_hash_remove_all(false, false, false);
90 90 }
91 91  
92 92 /*-----------------------------------------------------------------
... ... @@ -276,7 +276,7 @@
276 276 return table;
277 277 }
278 278  
279   -static void dm_hash_remove_all(int keep_open_devices)
  279 +static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)
280 280 {
281 281 int i, dev_skipped;
282 282 struct hash_cell *hc;
... ... @@ -293,7 +293,8 @@
293 293 md = hc->md;
294 294 dm_get(md);
295 295  
296   - if (keep_open_devices && dm_lock_for_deletion(md)) {
  296 + if (keep_open_devices &&
  297 + dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
297 298 dm_put(md);
298 299 dev_skipped++;
299 300 continue;
... ... @@ -450,6 +451,11 @@
450 451 return md;
451 452 }
452 453  
  454 +void dm_deferred_remove(void)
  455 +{
  456 + dm_hash_remove_all(true, false, true);
  457 +}
  458 +
453 459 /*-----------------------------------------------------------------
454 460 * Implementation of the ioctl commands
455 461 *---------------------------------------------------------------*/
... ... @@ -461,7 +467,7 @@
461 467  
462 468 static int remove_all(struct dm_ioctl *param, size_t param_size)
463 469 {
464   - dm_hash_remove_all(1);
  470 + dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false);
465 471 param->data_size = 0;
466 472 return 0;
467 473 }
... ... @@ -683,6 +689,9 @@
683 689 if (dm_suspended_md(md))
684 690 param->flags |= DM_SUSPEND_FLAG;
685 691  
  692 + if (dm_test_deferred_remove_flag(md))
  693 + param->flags |= DM_DEFERRED_REMOVE;
  694 +
686 695 param->dev = huge_encode_dev(disk_devt(disk));
687 696  
688 697 /*
689 698  
... ... @@ -832,8 +841,13 @@
832 841 /*
833 842 * Ensure the device is not open and nothing further can open it.
834 843 */
835   - r = dm_lock_for_deletion(md);
  844 + r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false);
836 845 if (r) {
  846 + if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) {
  847 + up_write(&_hash_lock);
  848 + dm_put(md);
  849 + return 0;
  850 + }
837 851 DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
838 852 up_write(&_hash_lock);
839 853 dm_put(md);
... ... @@ -848,6 +862,8 @@
848 862 dm_table_destroy(t);
849 863 }
850 864  
  865 + param->flags &= ~DM_DEFERRED_REMOVE;
  866 +
851 867 if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
852 868 param->flags |= DM_UEVENT_GENERATED_FLAG;
853 869  
... ... @@ -1468,6 +1484,14 @@
1468 1484  
1469 1485 if (**argv != '@')
1470 1486 return 2; /* no '@' prefix, deliver to target */
  1487 +
  1488 + if (!strcasecmp(argv[0], "@cancel_deferred_remove")) {
  1489 + if (argc != 1) {
  1490 + DMERR("Invalid arguments for @cancel_deferred_remove");
  1491 + return -EINVAL;
  1492 + }
  1493 + return dm_cancel_deferred_remove(md);
  1494 + }
1471 1495  
1472 1496 r = dm_stats_message(md, argc, argv, result, maxlen);
1473 1497 if (r < 2)
... ... @@ -49,6 +49,11 @@
49 49 static DEFINE_IDR(_minor_idr);
50 50  
51 51 static DEFINE_SPINLOCK(_minor_lock);
  52 +
  53 +static void do_deferred_remove(struct work_struct *w);
  54 +
  55 +static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
  56 +
52 57 /*
53 58 * For bio-based dm.
54 59 * One of these is allocated per bio.
... ... @@ -116,6 +121,7 @@
116 121 #define DMF_DELETING 4
117 122 #define DMF_NOFLUSH_SUSPENDING 5
118 123 #define DMF_MERGE_IS_OPTIONAL 6
  124 +#define DMF_DEFERRED_REMOVE 7
119 125  
120 126 /*
121 127 * A dummy definition to make RCU happy.
... ... @@ -299,6 +305,8 @@
299 305  
300 306 static void local_exit(void)
301 307 {
  308 + flush_scheduled_work();
  309 +
302 310 kmem_cache_destroy(_rq_tio_cache);
303 311 kmem_cache_destroy(_io_cache);
304 312 unregister_blkdev(_major, _name);
... ... @@ -404,7 +412,10 @@
404 412  
405 413 spin_lock(&_minor_lock);
406 414  
407   - atomic_dec(&md->open_count);
  415 + if (atomic_dec_and_test(&md->open_count) &&
  416 + (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
  417 + schedule_work(&deferred_remove_work);
  418 +
408 419 dm_put(md);
409 420  
410 421 spin_unlock(&_minor_lock);
411 422  
412 423  
... ... @@ -418,14 +429,18 @@
418 429 /*
419 430 * Guarantees nothing is using the device before it's deleted.
420 431 */
421   -int dm_lock_for_deletion(struct mapped_device *md)
  432 +int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
422 433 {
423 434 int r = 0;
424 435  
425 436 spin_lock(&_minor_lock);
426 437  
427   - if (dm_open_count(md))
  438 + if (dm_open_count(md)) {
428 439 r = -EBUSY;
  440 + if (mark_deferred)
  441 + set_bit(DMF_DEFERRED_REMOVE, &md->flags);
  442 + } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
  443 + r = -EEXIST;
429 444 else
430 445 set_bit(DMF_DELETING, &md->flags);
431 446  
... ... @@ -434,6 +449,27 @@
434 449 return r;
435 450 }
436 451  
  452 +int dm_cancel_deferred_remove(struct mapped_device *md)
  453 +{
  454 + int r = 0;
  455 +
  456 + spin_lock(&_minor_lock);
  457 +
  458 + if (test_bit(DMF_DELETING, &md->flags))
  459 + r = -EBUSY;
  460 + else
  461 + clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
  462 +
  463 + spin_unlock(&_minor_lock);
  464 +
  465 + return r;
  466 +}
  467 +
  468 +static void do_deferred_remove(struct work_struct *w)
  469 +{
  470 + dm_deferred_remove();
  471 +}
  472 +
437 473 sector_t dm_get_size(struct mapped_device *md)
438 474 {
439 475 return get_capacity(md->disk);
... ... @@ -2892,6 +2928,11 @@
2892 2928 int dm_suspended_md(struct mapped_device *md)
2893 2929 {
2894 2930 return test_bit(DMF_SUSPENDED, &md->flags);
  2931 +}
  2932 +
  2933 +int dm_test_deferred_remove_flag(struct mapped_device *md)
  2934 +{
  2935 + return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2895 2936 }
2896 2937  
2897 2938 int dm_suspended(struct dm_target *ti)
... ... @@ -129,6 +129,16 @@
129 129 int dm_suspended_md(struct mapped_device *md);
130 130  
131 131 /*
  132 + * Test if the device is scheduled for deferred remove.
  133 + */
  134 +int dm_test_deferred_remove_flag(struct mapped_device *md);
  135 +
  136 +/*
  137 + * Try to remove devices marked for deferred removal.
  138 + */
  139 +void dm_deferred_remove(void);
  140 +
  141 +/*
132 142 * The device-mapper can be driven through one of two interfaces;
133 143 * ioctl or filesystem, depending which patch you have applied.
134 144 */
... ... @@ -158,7 +168,8 @@
158 168 void dm_destroy(struct mapped_device *md);
159 169 void dm_destroy_immediate(struct mapped_device *md);
160 170 int dm_open_count(struct mapped_device *md);
161   -int dm_lock_for_deletion(struct mapped_device *md);
  171 +int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
  172 +int dm_cancel_deferred_remove(struct mapped_device *md);
162 173 int dm_request_based(struct mapped_device *md);
163 174 sector_t dm_get_size(struct mapped_device *md);
164 175 struct dm_stats *dm_get_stats(struct mapped_device *md);
include/uapi/linux/dm-ioctl.h
... ... @@ -267,9 +267,9 @@
267 267 #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
268 268  
269 269 #define DM_VERSION_MAJOR 4
270   -#define DM_VERSION_MINOR 26
  270 +#define DM_VERSION_MINOR 27
271 271 #define DM_VERSION_PATCHLEVEL 0
272   -#define DM_VERSION_EXTRA "-ioctl (2013-08-15)"
  272 +#define DM_VERSION_EXTRA "-ioctl (2013-10-30)"
273 273  
274 274 /* Status bits */
275 275 #define DM_READONLY_FLAG (1 << 0) /* In/Out */
... ... @@ -340,6 +340,17 @@
340 340 * If set, a message generated output data.
341 341 */
342 342 #define DM_DATA_OUT_FLAG (1 << 16) /* Out */
  343 +
  344 +/*
  345 + * If set with DM_DEV_REMOVE or DM_REMOVE_ALL this indicates that if
  346 + * the device cannot be removed immediately because it is still in use
  347 + * it should instead be scheduled for removal when it gets closed.
  348 + *
  349 + * On return from DM_DEV_REMOVE, DM_DEV_STATUS or other ioctls, this
  350 + * flag indicates that the device is scheduled to be removed when it
  351 + * gets closed.
  352 + */
  353 +#define DM_DEFERRED_REMOVE (1 << 17) /* In/Out */
343 354  
344 355 #endif /* _LINUX_DM_IOCTL_H */