Commit a91d5ac04841ca1be340e8610e6d899fc8b419b5
Committed by
NeilBrown
1 parent
b6d428c669
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
MD: Export 'md_reap_sync_thread' function
MD: Export 'md_reap_sync_thread' function Make 'md_reap_sync_thread' available to other files, specifically dm-raid.c. - rename reap_sync_thread to md_reap_sync_thread - move the fn after md_check_recovery to match md.h declaration placement - export md_reap_sync_thread Signed-off-by: Jonathan Brassow <jbrassow@redhat.com> Signed-off-by: NeilBrown <neilb@suse.de>
Showing 2 changed files with 50 additions and 50 deletions Side-by-side Diff
drivers/md/md.c
... | ... | @@ -4231,8 +4231,6 @@ |
4231 | 4231 | return sprintf(page, "%s\n", type); |
4232 | 4232 | } |
4233 | 4233 | |
4234 | -static void reap_sync_thread(struct mddev *mddev); | |
4235 | - | |
4236 | 4234 | static ssize_t |
4237 | 4235 | action_store(struct mddev *mddev, const char *page, size_t len) |
4238 | 4236 | { |
... | ... | @@ -4247,7 +4245,7 @@ |
4247 | 4245 | if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { |
4248 | 4246 | if (mddev->sync_thread) { |
4249 | 4247 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
4250 | - reap_sync_thread(mddev); | |
4248 | + md_reap_sync_thread(mddev); | |
4251 | 4249 | } |
4252 | 4250 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || |
4253 | 4251 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) |
... | ... | @@ -5285,7 +5283,7 @@ |
5285 | 5283 | if (mddev->sync_thread) { |
5286 | 5284 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
5287 | 5285 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
5288 | - reap_sync_thread(mddev); | |
5286 | + md_reap_sync_thread(mddev); | |
5289 | 5287 | } |
5290 | 5288 | |
5291 | 5289 | del_timer_sync(&mddev->safemode_timer); |
... | ... | @@ -7742,51 +7740,6 @@ |
7742 | 7740 | return spares; |
7743 | 7741 | } |
7744 | 7742 | |
7745 | -static void reap_sync_thread(struct mddev *mddev) | |
7746 | -{ | |
7747 | - struct md_rdev *rdev; | |
7748 | - | |
7749 | - /* resync has finished, collect result */ | |
7750 | - md_unregister_thread(&mddev->sync_thread); | |
7751 | - if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | |
7752 | - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | |
7753 | - /* success...*/ | |
7754 | - /* activate any spares */ | |
7755 | - if (mddev->pers->spare_active(mddev)) { | |
7756 | - sysfs_notify(&mddev->kobj, NULL, | |
7757 | - "degraded"); | |
7758 | - set_bit(MD_CHANGE_DEVS, &mddev->flags); | |
7759 | - } | |
7760 | - } | |
7761 | - if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && | |
7762 | - mddev->pers->finish_reshape) | |
7763 | - mddev->pers->finish_reshape(mddev); | |
7764 | - | |
7765 | - /* If array is no-longer degraded, then any saved_raid_disk | |
7766 | - * information must be scrapped. Also if any device is now | |
7767 | - * In_sync we must scrape the saved_raid_disk for that device | |
7768 | - * do the superblock for an incrementally recovered device | |
7769 | - * written out. | |
7770 | - */ | |
7771 | - rdev_for_each(rdev, mddev) | |
7772 | - if (!mddev->degraded || | |
7773 | - test_bit(In_sync, &rdev->flags)) | |
7774 | - rdev->saved_raid_disk = -1; | |
7775 | - | |
7776 | - md_update_sb(mddev, 1); | |
7777 | - clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | |
7778 | - clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | |
7779 | - clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | |
7780 | - clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); | |
7781 | - clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); | |
7782 | - /* flag recovery needed just to double check */ | |
7783 | - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
7784 | - sysfs_notify_dirent_safe(mddev->sysfs_action); | |
7785 | - md_new_event(mddev); | |
7786 | - if (mddev->event_work.func) | |
7787 | - queue_work(md_misc_wq, &mddev->event_work); | |
7788 | -} | |
7789 | - | |
7790 | 7743 | /* |
7791 | 7744 | * This routine is regularly called by all per-raid-array threads to |
7792 | 7745 | * deal with generic issues like resync and super-block update. |
... | ... | @@ -7883,7 +7836,7 @@ |
7883 | 7836 | goto unlock; |
7884 | 7837 | } |
7885 | 7838 | if (mddev->sync_thread) { |
7886 | - reap_sync_thread(mddev); | |
7839 | + md_reap_sync_thread(mddev); | |
7887 | 7840 | goto unlock; |
7888 | 7841 | } |
7889 | 7842 | /* Set RUNNING before clearing NEEDED to avoid |
... | ... | @@ -7964,6 +7917,51 @@ |
7964 | 7917 | } |
7965 | 7918 | } |
7966 | 7919 | |
7920 | +void md_reap_sync_thread(struct mddev *mddev) | |
7921 | +{ | |
7922 | + struct md_rdev *rdev; | |
7923 | + | |
7924 | + /* resync has finished, collect result */ | |
7925 | + md_unregister_thread(&mddev->sync_thread); | |
7926 | + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | |
7927 | + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | |
7928 | + /* success...*/ | |
7929 | + /* activate any spares */ | |
7930 | + if (mddev->pers->spare_active(mddev)) { | |
7931 | + sysfs_notify(&mddev->kobj, NULL, | |
7932 | + "degraded"); | |
7933 | + set_bit(MD_CHANGE_DEVS, &mddev->flags); | |
7934 | + } | |
7935 | + } | |
7936 | + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && | |
7937 | + mddev->pers->finish_reshape) | |
7938 | + mddev->pers->finish_reshape(mddev); | |
7939 | + | |
7940 | + /* If array is no-longer degraded, then any saved_raid_disk | |
7941 | + * information must be scrapped. Also if any device is now | |
7942 | + * In_sync we must scrape the saved_raid_disk for that device | |
7943 | + * do the superblock for an incrementally recovered device | |
7944 | + * written out. | |
7945 | + */ | |
7946 | + rdev_for_each(rdev, mddev) | |
7947 | + if (!mddev->degraded || | |
7948 | + test_bit(In_sync, &rdev->flags)) | |
7949 | + rdev->saved_raid_disk = -1; | |
7950 | + | |
7951 | + md_update_sb(mddev, 1); | |
7952 | + clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | |
7953 | + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | |
7954 | + clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | |
7955 | + clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); | |
7956 | + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); | |
7957 | + /* flag recovery needed just to double check */ | |
7958 | + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
7959 | + sysfs_notify_dirent_safe(mddev->sysfs_action); | |
7960 | + md_new_event(mddev); | |
7961 | + if (mddev->event_work.func) | |
7962 | + queue_work(md_misc_wq, &mddev->event_work); | |
7963 | +} | |
7964 | + | |
7967 | 7965 | void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) |
7968 | 7966 | { |
7969 | 7967 | sysfs_notify_dirent_safe(rdev->sysfs_state); |
... | ... | @@ -8689,6 +8687,7 @@ |
8689 | 8687 | EXPORT_SYMBOL(md_unregister_thread); |
8690 | 8688 | EXPORT_SYMBOL(md_wakeup_thread); |
8691 | 8689 | EXPORT_SYMBOL(md_check_recovery); |
8690 | +EXPORT_SYMBOL(md_reap_sync_thread); | |
8692 | 8691 | MODULE_LICENSE("GPL"); |
8693 | 8692 | MODULE_DESCRIPTION("MD RAID framework"); |
8694 | 8693 | MODULE_ALIAS("md"); |
drivers/md/md.h
... | ... | @@ -567,6 +567,7 @@ |
567 | 567 | extern void md_unregister_thread(struct md_thread **threadp); |
568 | 568 | extern void md_wakeup_thread(struct md_thread *thread); |
569 | 569 | extern void md_check_recovery(struct mddev *mddev); |
570 | +extern void md_reap_sync_thread(struct mddev *mddev); | |
570 | 571 | extern void md_write_start(struct mddev *mddev, struct bio *bi); |
571 | 572 | extern void md_write_end(struct mddev *mddev); |
572 | 573 | extern void md_done_sync(struct mddev *mddev, int blocks, int ok); |