Commit 50f3c3efdd5773d90396be07a7ecaa58227ff906

Authored by Joe Thornber
Committed by Mike Snitzer
1 parent 6afbc01d75

dm thin: switch to an atomic_t for tracking pending new block preparations

Previously we used separate boolean values to track quiescing and
copying actions.  By switching to an atomic_t we can support blocks that
need a partial copy and partial zero.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

Showing 1 changed file with 16 additions and 13 deletions Side-by-side Diff

drivers/md/dm-thin.c
... ... @@ -554,11 +554,16 @@
554 554 struct dm_thin_new_mapping {
555 555 struct list_head list;
556 556  
557   - bool quiesced:1;
558   - bool prepared:1;
559 557 bool pass_discard:1;
560 558 bool definitely_not_shared:1;
561 559  
  560 + /*
  561 + * Track quiescing, copying and zeroing preparation actions. When this
  562 + * counter hits zero the block is prepared and can be inserted into the
  563 + * btree.
  564 + */
  565 + atomic_t prepare_actions;
  566 +
562 567 int err;
563 568 struct thin_c *tc;
564 569 dm_block_t virt_block;
565 570  
... ... @@ -575,11 +580,11 @@
575 580 bio_end_io_t *saved_bi_end_io;
576 581 };
577 582  
578   -static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
  583 +static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
579 584 {
580 585 struct pool *pool = m->tc->pool;
581 586  
582   - if (m->quiesced && m->prepared) {
  587 + if (atomic_dec_and_test(&m->prepare_actions)) {
583 588 list_add_tail(&m->list, &pool->prepared_mappings);
584 589 wake_worker(pool);
585 590 }
... ... @@ -594,8 +599,7 @@
594 599 m->err = read_err || write_err ? -EIO : 0;
595 600  
596 601 spin_lock_irqsave(&pool->lock, flags);
597   - m->prepared = true;
598   - __maybe_add_mapping(m);
  602 + __complete_mapping_preparation(m);
599 603 spin_unlock_irqrestore(&pool->lock, flags);
600 604 }
601 605  
... ... @@ -609,8 +613,7 @@
609 613 m->err = err;
610 614  
611 615 spin_lock_irqsave(&pool->lock, flags);
612   - m->prepared = true;
613   - __maybe_add_mapping(m);
  616 + __complete_mapping_preparation(m);
614 617 spin_unlock_irqrestore(&pool->lock, flags);
615 618 }
616 619  
... ... @@ -836,7 +839,9 @@
836 839 m->cell = cell;
837 840  
838 841 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
839   - m->quiesced = true;
  842 + atomic_set(&m->prepare_actions, 1); /* copy only */
  843 + else
  844 + atomic_set(&m->prepare_actions, 2); /* quiesce + copy */
840 845  
841 846 /*
842 847 * IO to pool_dev remaps to the pool target's data_dev.
... ... @@ -896,8 +901,7 @@
896 901 struct pool *pool = tc->pool;
897 902 struct dm_thin_new_mapping *m = get_next_mapping(pool);
898 903  
899   - m->quiesced = true;
900   - m->prepared = false;
  904 + atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
901 905 m->tc = tc;
902 906 m->virt_block = virt_block;
903 907 m->data_block = data_block;
... ... @@ -3361,8 +3365,7 @@
3361 3365 spin_lock_irqsave(&pool->lock, flags);
3362 3366 list_for_each_entry_safe(m, tmp, &work, list) {
3363 3367 list_del(&m->list);
3364   - m->quiesced = true;
3365   - __maybe_add_mapping(m);
  3368 + __complete_mapping_preparation(m);
3366 3369 }
3367 3370 spin_unlock_irqrestore(&pool->lock, flags);
3368 3371 }