Commit 4184153f9e483f9bb63339ed316e059962fe9794
Committed by
Alasdair G Kergon
1 parent
f1e5398746
Exists in
master
and in
7 other branches
dm raid1: support flush
Flush support for dm-raid1. When it receives an empty barrier, submit it to all the devices via dm-io. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Showing 2 changed files with 34 additions and 4 deletions Side-by-side Diff
drivers/md/dm-raid1.c
... | ... | @@ -396,6 +396,8 @@ |
396 | 396 | */ |
397 | 397 | static sector_t map_sector(struct mirror *m, struct bio *bio) |
398 | 398 | { |
399 | + if (unlikely(!bio->bi_size)) | |
400 | + return 0; | |
399 | 401 | return m->offset + (bio->bi_sector - m->ms->ti->begin); |
400 | 402 | } |
401 | 403 | |
... | ... | @@ -562,7 +564,7 @@ |
562 | 564 | struct dm_io_region io[ms->nr_mirrors], *dest = io; |
563 | 565 | struct mirror *m; |
564 | 566 | struct dm_io_request io_req = { |
565 | - .bi_rw = WRITE, | |
567 | + .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER), | |
566 | 568 | .mem.type = DM_IO_BVEC, |
567 | 569 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, |
568 | 570 | .notify.fn = write_callback, |
... | ... | @@ -603,6 +605,11 @@ |
603 | 605 | bio_list_init(&requeue); |
604 | 606 | |
605 | 607 | while ((bio = bio_list_pop(writes))) { |
608 | + if (unlikely(bio_empty_barrier(bio))) { | |
609 | + bio_list_add(&sync, bio); | |
610 | + continue; | |
611 | + } | |
612 | + | |
606 | 613 | region = dm_rh_bio_to_region(ms->rh, bio); |
607 | 614 | |
608 | 615 | if (log->type->is_remote_recovering && |
... | ... | @@ -995,6 +1002,7 @@ |
995 | 1002 | |
996 | 1003 | ti->private = ms; |
997 | 1004 | ti->split_io = dm_rh_get_region_size(ms->rh); |
1005 | + ti->num_flush_requests = 1; | |
998 | 1006 | |
999 | 1007 | ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); |
1000 | 1008 | if (!ms->kmirrord_wq) { |
... | ... | @@ -1122,7 +1130,8 @@ |
1122 | 1130 | * We need to dec pending if this was a write. |
1123 | 1131 | */ |
1124 | 1132 | if (rw == WRITE) { |
1125 | - dm_rh_dec(ms->rh, map_context->ll); | |
1133 | + if (likely(!bio_empty_barrier(bio))) | |
1134 | + dm_rh_dec(ms->rh, map_context->ll); | |
1126 | 1135 | return error; |
1127 | 1136 | } |
1128 | 1137 |
drivers/md/dm-region-hash.c
... | ... | @@ -79,6 +79,11 @@ |
79 | 79 | struct list_head recovered_regions; |
80 | 80 | struct list_head failed_recovered_regions; |
81 | 81 | |
82 | + /* | |
83 | + * If there was a barrier failure no regions can be marked clean. | |
84 | + */ | |
85 | + int barrier_failure; | |
86 | + | |
82 | 87 | void *context; |
83 | 88 | sector_t target_begin; |
84 | 89 | |
... | ... | @@ -211,6 +216,7 @@ |
211 | 216 | INIT_LIST_HEAD(&rh->quiesced_regions); |
212 | 217 | INIT_LIST_HEAD(&rh->recovered_regions); |
213 | 218 | INIT_LIST_HEAD(&rh->failed_recovered_regions); |
219 | + rh->barrier_failure = 0; | |
214 | 220 | |
215 | 221 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, |
216 | 222 | sizeof(struct dm_region)); |
... | ... | @@ -395,6 +401,11 @@ |
395 | 401 | region_t region = dm_rh_bio_to_region(rh, bio); |
396 | 402 | int recovering = 0; |
397 | 403 | |
404 | + if (bio_empty_barrier(bio)) { | |
405 | + rh->barrier_failure = 1; | |
406 | + return; | |
407 | + } | |
408 | + | |
398 | 409 | /* We must inform the log that the sync count has changed. */ |
399 | 410 | log->type->set_region_sync(log, region, 0); |
400 | 411 | |
401 | 412 | |
... | ... | @@ -515,8 +526,11 @@ |
515 | 526 | { |
516 | 527 | struct bio *bio; |
517 | 528 | |
518 | - for (bio = bios->head; bio; bio = bio->bi_next) | |
529 | + for (bio = bios->head; bio; bio = bio->bi_next) { | |
530 | + if (bio_empty_barrier(bio)) | |
531 | + continue; | |
519 | 532 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); |
533 | + } | |
520 | 534 | } |
521 | 535 | EXPORT_SYMBOL_GPL(dm_rh_inc_pending); |
522 | 536 | |
... | ... | @@ -544,7 +558,14 @@ |
544 | 558 | */ |
545 | 559 | |
546 | 560 | /* do nothing for DM_RH_NOSYNC */ |
547 | - if (reg->state == DM_RH_RECOVERING) { | |
561 | + if (unlikely(rh->barrier_failure)) { | |
562 | + /* | |
563 | + * If a write barrier failed some time ago, we | |
564 | + * don't know whether or not this write made it | |
565 | + * to the disk, so we must resync the device. | |
566 | + */ | |
567 | + reg->state = DM_RH_NOSYNC; | |
568 | + } else if (reg->state == DM_RH_RECOVERING) { | |
548 | 569 | list_add_tail(®->list, &rh->quiesced_regions); |
549 | 570 | } else if (reg->state == DM_RH_DIRTY) { |
550 | 571 | reg->state = DM_RH_CLEAN; |