Commit 9cbb17508808f8a6bdd83354b61e126ac4fa6fed
Committed by
Jens Axboe
1 parent
0021b7bc04
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
blk: centralize non-request unplug handling.
Both md and umem has similar code for getting notified on an blk_finish_plug event. Centralize this code in block/ and allow each driver to provide its distinctive difference. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Showing 5 changed files with 49 additions and 83 deletions Side-by-side Diff
block/blk-core.c
... | ... | @@ -2927,6 +2927,31 @@ |
2927 | 2927 | } |
2928 | 2928 | } |
2929 | 2929 | |
2930 | +struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, | |
2931 | + int size) | |
2932 | +{ | |
2933 | + struct blk_plug *plug = current->plug; | |
2934 | + struct blk_plug_cb *cb; | |
2935 | + | |
2936 | + if (!plug) | |
2937 | + return NULL; | |
2938 | + | |
2939 | + list_for_each_entry(cb, &plug->cb_list, list) | |
2940 | + if (cb->callback == unplug && cb->data == data) | |
2941 | + return cb; | |
2942 | + | |
2943 | + /* Not currently on the callback list */ | |
2944 | + BUG_ON(size < sizeof(*cb)); | |
2945 | + cb = kzalloc(size, GFP_ATOMIC); | |
2946 | + if (cb) { | |
2947 | + cb->data = data; | |
2948 | + cb->callback = unplug; | |
2949 | + list_add(&cb->list, &plug->cb_list); | |
2950 | + } | |
2951 | + return cb; | |
2952 | +} | |
2953 | +EXPORT_SYMBOL(blk_check_plugged); | |
2954 | + | |
2930 | 2955 | void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) |
2931 | 2956 | { |
2932 | 2957 | struct request_queue *q; |
drivers/block/umem.c
... | ... | @@ -513,42 +513,19 @@ |
513 | 513 | } |
514 | 514 | } |
515 | 515 | |
516 | -struct mm_plug_cb { | |
517 | - struct blk_plug_cb cb; | |
518 | - struct cardinfo *card; | |
519 | -}; | |
520 | - | |
521 | 516 | static void mm_unplug(struct blk_plug_cb *cb) |
522 | 517 | { |
523 | - struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb); | |
518 | + struct cardinfo *card = cb->data; | |
524 | 519 | |
525 | - spin_lock_irq(&mmcb->card->lock); | |
526 | - activate(mmcb->card); | |
527 | - spin_unlock_irq(&mmcb->card->lock); | |
528 | - kfree(mmcb); | |
520 | + spin_lock_irq(&card->lock); | |
521 | + activate(card); | |
522 | + spin_unlock_irq(&card->lock); | |
523 | + kfree(cb); | |
529 | 524 | } |
530 | 525 | |
531 | 526 | static int mm_check_plugged(struct cardinfo *card) |
532 | 527 | { |
533 | - struct blk_plug *plug = current->plug; | |
534 | - struct mm_plug_cb *mmcb; | |
535 | - | |
536 | - if (!plug) | |
537 | - return 0; | |
538 | - | |
539 | - list_for_each_entry(mmcb, &plug->cb_list, cb.list) { | |
540 | - if (mmcb->cb.callback == mm_unplug && mmcb->card == card) | |
541 | - return 1; | |
542 | - } | |
543 | - /* Not currently on the callback list */ | |
544 | - mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC); | |
545 | - if (!mmcb) | |
546 | - return 0; | |
547 | - | |
548 | - mmcb->card = card; | |
549 | - mmcb->cb.callback = mm_unplug; | |
550 | - list_add(&mmcb->cb.list, &plug->cb_list); | |
551 | - return 1; | |
528 | + return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); | |
552 | 529 | } |
553 | 530 | |
554 | 531 | static void mm_make_request(struct request_queue *q, struct bio *bio) |
drivers/md/md.c
... | ... | @@ -498,59 +498,13 @@ |
498 | 498 | } |
499 | 499 | EXPORT_SYMBOL(md_flush_request); |
500 | 500 | |
501 | -/* Support for plugging. | |
502 | - * This mirrors the plugging support in request_queue, but does not | |
503 | - * require having a whole queue or request structures. | |
504 | - * We allocate an md_plug_cb for each md device and each thread it gets | |
505 | - * plugged on. This links tot the private plug_handle structure in the | |
506 | - * personality data where we keep a count of the number of outstanding | |
507 | - * plugs so other code can see if a plug is active. | |
508 | - */ | |
509 | -struct md_plug_cb { | |
510 | - struct blk_plug_cb cb; | |
511 | - struct mddev *mddev; | |
512 | -}; | |
513 | - | |
514 | -static void plugger_unplug(struct blk_plug_cb *cb) | |
501 | +void md_unplug(struct blk_plug_cb *cb) | |
515 | 502 | { |
516 | - struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); | |
517 | - md_wakeup_thread(mdcb->mddev->thread); | |
518 | - kfree(mdcb); | |
503 | + struct mddev *mddev = cb->data; | |
504 | + md_wakeup_thread(mddev->thread); | |
505 | + kfree(cb); | |
519 | 506 | } |
520 | - | |
521 | -/* Check that an unplug wakeup will come shortly. | |
522 | - * If not, wakeup the md thread immediately | |
523 | - */ | |
524 | -int mddev_check_plugged(struct mddev *mddev) | |
525 | -{ | |
526 | - struct blk_plug *plug = current->plug; | |
527 | - struct md_plug_cb *mdcb; | |
528 | - | |
529 | - if (!plug) | |
530 | - return 0; | |
531 | - | |
532 | - list_for_each_entry(mdcb, &plug->cb_list, cb.list) { | |
533 | - if (mdcb->cb.callback == plugger_unplug && | |
534 | - mdcb->mddev == mddev) { | |
535 | - /* Already on the list, move to top */ | |
536 | - if (mdcb != list_first_entry(&plug->cb_list, | |
537 | - struct md_plug_cb, | |
538 | - cb.list)) | |
539 | - list_move(&mdcb->cb.list, &plug->cb_list); | |
540 | - return 1; | |
541 | - } | |
542 | - } | |
543 | - /* Not currently on the callback list */ | |
544 | - mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); | |
545 | - if (!mdcb) | |
546 | - return 0; | |
547 | - | |
548 | - mdcb->mddev = mddev; | |
549 | - mdcb->cb.callback = plugger_unplug; | |
550 | - list_add(&mdcb->cb.list, &plug->cb_list); | |
551 | - return 1; | |
552 | -} | |
553 | -EXPORT_SYMBOL_GPL(mddev_check_plugged); | |
507 | +EXPORT_SYMBOL(md_unplug); | |
554 | 508 | |
555 | 509 | static inline struct mddev *mddev_get(struct mddev *mddev) |
556 | 510 | { |
drivers/md/md.h
... | ... | @@ -627,7 +627,13 @@ |
627 | 627 | struct mddev *mddev); |
628 | 628 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
629 | 629 | struct mddev *mddev); |
630 | -extern int mddev_check_plugged(struct mddev *mddev); | |
631 | 630 | extern void md_trim_bio(struct bio *bio, int offset, int size); |
631 | + | |
632 | +extern void md_unplug(struct blk_plug_cb *cb); | |
633 | +static inline int mddev_check_plugged(struct mddev *mddev) | |
634 | +{ | |
635 | + return !!blk_check_plugged(md_unplug, mddev, | |
636 | + sizeof(struct blk_plug_cb)); | |
637 | +} | |
632 | 638 | #endif /* _MD_MD_H */ |
include/linux/blkdev.h
... | ... | @@ -922,11 +922,15 @@ |
922 | 922 | }; |
923 | 923 | #define BLK_MAX_REQUEST_COUNT 16 |
924 | 924 | |
925 | +struct blk_plug_cb; | |
926 | +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *); | |
925 | 927 | struct blk_plug_cb { |
926 | 928 | struct list_head list; |
927 | - void (*callback)(struct blk_plug_cb *); | |
929 | + blk_plug_cb_fn callback; | |
930 | + void *data; | |
928 | 931 | }; |
929 | - | |
932 | +extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, | |
933 | + void *data, int size); | |
930 | 934 | extern void blk_start_plug(struct blk_plug *); |
931 | 935 | extern void blk_finish_plug(struct blk_plug *); |
932 | 936 | extern void blk_flush_plug_list(struct blk_plug *, bool); |