Commit 492dfb489658dfe4a755fa29dd0e34e9c8bd8fb8
Committed by
James Bottomley
1 parent
f19eaa7f53
Exists in
master
and in
4 other branches
[SCSI] block: add support for shared tag maps
The current block queue implementation already contains most of the machinery for shared tag maps. The only remaining pieces are a way to allocate and destroy a tag map independently of the queues (so that the maps can be managed on the life cycle of the overseeing entity) Acked-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Showing 2 changed files with 90 additions and 21 deletions Side-by-side Diff
block/ll_rw_blk.c
| ... | ... | @@ -848,21 +848,18 @@ |
| 848 | 848 | EXPORT_SYMBOL(blk_queue_find_tag); |
| 849 | 849 | |
| 850 | 850 | /** |
| 851 | - * __blk_queue_free_tags - release tag maintenance info | |
| 852 | - * @q: the request queue for the device | |
| 851 | + * __blk_free_tags - release a given set of tag maintenance info | |
| 852 | + * @bqt: the tag map to free | |
| 853 | 853 | * |
| 854 | - * Notes: | |
| 855 | - * blk_cleanup_queue() will take care of calling this function, if tagging | |
| 856 | - * has been used. So there's no need to call this directly. | |
| 857 | - **/ | |
| 858 | -static void __blk_queue_free_tags(request_queue_t *q) | |
| 854 | + * Tries to free the specified @bqt@. Returns true if it was | |
| 855 | + * actually freed and false if there are still references using it | |
| 856 | + */ | |
| 857 | +static int __blk_free_tags(struct blk_queue_tag *bqt) | |
| 859 | 858 | { |
| 860 | - struct blk_queue_tag *bqt = q->queue_tags; | |
| 859 | + int retval; | |
| 861 | 860 | |
| 862 | - if (!bqt) | |
| 863 | - return; | |
| 864 | - | |
| 865 | - if (atomic_dec_and_test(&bqt->refcnt)) { | |
| 861 | + retval = atomic_dec_and_test(&bqt->refcnt); | |
| 862 | + if (retval) { | |
| 866 | 863 | BUG_ON(bqt->busy); |
| 867 | 864 | BUG_ON(!list_empty(&bqt->busy_list)); |
| 868 | 865 | |
| 869 | 866 | |
| 870 | 867 | |
| 871 | 868 | |
| ... | ... | @@ -873,13 +870,50 @@ |
| 873 | 870 | bqt->tag_map = NULL; |
| 874 | 871 | |
| 875 | 872 | kfree(bqt); |
| 873 | + | |
| 876 | 874 | } |
| 877 | 875 | |
| 876 | + return retval; | |
| 877 | +} | |
| 878 | + | |
| 879 | +/** | |
| 880 | + * __blk_queue_free_tags - release tag maintenance info | |
| 881 | + * @q: the request queue for the device | |
| 882 | + * | |
| 883 | + * Notes: | |
| 884 | + * blk_cleanup_queue() will take care of calling this function, if tagging | |
| 885 | + * has been used. So there's no need to call this directly. | |
| 886 | + **/ | |
| 887 | +static void __blk_queue_free_tags(request_queue_t *q) | |
| 888 | +{ | |
| 889 | + struct blk_queue_tag *bqt = q->queue_tags; | |
| 890 | + | |
| 891 | + if (!bqt) | |
| 892 | + return; | |
| 893 | + | |
| 894 | + __blk_free_tags(bqt); | |
| 895 | + | |
| 878 | 896 | q->queue_tags = NULL; |
| 879 | 897 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); |
| 880 | 898 | } |
| 881 | 899 | |
| 900 | + | |
| 882 | 901 | /** |
| 902 | + * blk_free_tags - release a given set of tag maintenance info | |
| 903 | + * @bqt: the tag map to free | |
| 904 | + * | |
| 905 | + * For externally managed @bqt@ frees the map. Callers of this | |
| 906 | + * function must guarantee to have released all the queues that | |
| 907 | + * might have been using this tag map. | |
| 908 | + */ | |
| 909 | +void blk_free_tags(struct blk_queue_tag *bqt) | |
| 910 | +{ | |
| 911 | + if (unlikely(!__blk_free_tags(bqt))) | |
| 912 | + BUG(); | |
| 913 | +} | |
| 914 | +EXPORT_SYMBOL(blk_free_tags); | |
| 915 | + | |
| 916 | +/** | |
| 883 | 917 | * blk_queue_free_tags - release tag maintenance info |
| 884 | 918 | * @q: the request queue for the device |
| 885 | 919 | * |
| ... | ... | @@ -901,7 +935,7 @@ |
| 901 | 935 | unsigned long *tag_map; |
| 902 | 936 | int nr_ulongs; |
| 903 | 937 | |
| 904 | - if (depth > q->nr_requests * 2) { | |
| 938 | + if (q && depth > q->nr_requests * 2) { | |
| 905 | 939 | depth = q->nr_requests * 2; |
| 906 | 940 | printk(KERN_ERR "%s: adjusted depth to %d\n", |
| 907 | 941 | __FUNCTION__, depth); |
| 908 | 942 | |
| ... | ... | @@ -927,7 +961,39 @@ |
| 927 | 961 | return -ENOMEM; |
| 928 | 962 | } |
| 929 | 963 | |
| 964 | +static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | |
| 965 | + int depth) | |
| 966 | +{ | |
| 967 | + struct blk_queue_tag *tags; | |
| 968 | + | |
| 969 | + tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | |
| 970 | + if (!tags) | |
| 971 | + goto fail; | |
| 972 | + | |
| 973 | + if (init_tag_map(q, tags, depth)) | |
| 974 | + goto fail; | |
| 975 | + | |
| 976 | + INIT_LIST_HEAD(&tags->busy_list); | |
| 977 | + tags->busy = 0; | |
| 978 | + atomic_set(&tags->refcnt, 1); | |
| 979 | + return tags; | |
| 980 | +fail: | |
| 981 | + kfree(tags); | |
| 982 | + return NULL; | |
| 983 | +} | |
| 984 | + | |
| 930 | 985 | /** |
| 986 | + * blk_init_tags - initialize the tag info for an external tag map | |
| 987 | + * @depth: the maximum queue depth supported | |
| 988 | + * @tags: the tag to use | |
| 989 | + **/ | |
| 990 | +struct blk_queue_tag *blk_init_tags(int depth) | |
| 991 | +{ | |
| 992 | + return __blk_queue_init_tags(NULL, depth); | |
| 993 | +} | |
| 994 | +EXPORT_SYMBOL(blk_init_tags); | |
| 995 | + | |
| 996 | +/** | |
| 931 | 997 | * blk_queue_init_tags - initialize the queue tag info |
| 932 | 998 | * @q: the request queue for the device |
| 933 | 999 | * @depth: the maximum queue depth supported |
| 934 | 1000 | |
| ... | ... | @@ -941,16 +1007,10 @@ |
| 941 | 1007 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); |
| 942 | 1008 | |
| 943 | 1009 | if (!tags && !q->queue_tags) { |
| 944 | - tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); | |
| 1010 | + tags = __blk_queue_init_tags(q, depth); | |
| 1011 | + | |
| 945 | 1012 | if (!tags) |
| 946 | 1013 | goto fail; |
| 947 | - | |
| 948 | - if (init_tag_map(q, tags, depth)) | |
| 949 | - goto fail; | |
| 950 | - | |
| 951 | - INIT_LIST_HEAD(&tags->busy_list); | |
| 952 | - tags->busy = 0; | |
| 953 | - atomic_set(&tags->refcnt, 1); | |
| 954 | 1014 | } else if (q->queue_tags) { |
| 955 | 1015 | if ((rc = blk_queue_resize_tags(q, depth))) |
| 956 | 1016 | return rc; |
| ... | ... | @@ -1000,6 +1060,13 @@ |
| 1000 | 1060 | bqt->max_depth = new_depth; |
| 1001 | 1061 | return 0; |
| 1002 | 1062 | } |
| 1063 | + | |
| 1064 | + /* | |
| 1065 | + * Currently cannot replace a shared tag map with a new | |
| 1066 | + * one, so error out if this is the case | |
| 1067 | + */ | |
| 1068 | + if (atomic_read(&bqt->refcnt) != 1) | |
| 1069 | + return -EBUSY; | |
| 1003 | 1070 | |
| 1004 | 1071 | /* |
| 1005 | 1072 | * save the old state info, so we can copy it back |
include/linux/blkdev.h
| ... | ... | @@ -746,6 +746,8 @@ |
| 746 | 746 | extern int blk_queue_resize_tags(request_queue_t *, int); |
| 747 | 747 | extern void blk_queue_invalidate_tags(request_queue_t *); |
| 748 | 748 | extern long blk_congestion_wait(int rw, long timeout); |
| 749 | +extern struct blk_queue_tag *blk_init_tags(int); | |
| 750 | +extern void blk_free_tags(struct blk_queue_tag *); | |
| 749 | 751 | |
| 750 | 752 | extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); |
| 751 | 753 | extern int blkdev_issue_flush(struct block_device *, sector_t *); |