Commit 5309cb38de65eddd5f7e125da750accf949f29e8

Authored by Jens Axboe
1 parent 3862153b67

Add queue resizing support

Just get rid of the preallocated command map, use the slab cache
to get/free commands instead.

Original patch from FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>,
changed by me to not use a mempool.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

Showing 1 changed file with 32 additions and 64 deletions Side-by-side Diff

... ... @@ -33,8 +33,6 @@
33 33  
34 34 static char bsg_version[] = "block layer sg (bsg) 0.4";
35 35  
36   -struct bsg_command;
37   -
38 36 struct bsg_device {
39 37 struct gendisk *disk;
40 38 request_queue_t *queue;
... ... @@ -46,8 +44,6 @@
46 44 int minor;
47 45 int queued_cmds;
48 46 int done_cmds;
49   - unsigned long *cmd_bitmap;
50   - struct bsg_command *cmd_map;
51 47 wait_queue_head_t wq_done;
52 48 wait_queue_head_t wq_free;
53 49 char name[BDEVNAME_SIZE];
... ... @@ -60,14 +56,7 @@
60 56 BSG_F_WRITE_PERM = 2,
61 57 };
62 58  
63   -/*
64   - * command allocation bitmap defines
65   - */
66   -#define BSG_CMDS_PAGE_ORDER (1)
67   -#define BSG_CMDS_PER_LONG (sizeof(unsigned long) * 8)
68   -#define BSG_CMDS_MASK (BSG_CMDS_PER_LONG - 1)
69   -#define BSG_CMDS_BYTES (PAGE_SIZE * (1 << BSG_CMDS_PAGE_ORDER))
70   -#define BSG_CMDS (BSG_CMDS_BYTES / sizeof(struct bsg_command))
  59 +#define BSG_DEFAULT_CMDS 64
71 60  
72 61 #undef BSG_DEBUG
73 62  
... ... @@ -94,6 +83,8 @@
94 83 static struct class *bsg_class;
95 84 static LIST_HEAD(bsg_class_list);
96 85  
  86 +static struct kmem_cache *bsg_cmd_cachep;
  87 +
97 88 /*
98 89 * our internal command type
99 90 */
100 91  
101 92  
... ... @@ -111,14 +102,12 @@
111 102 static void bsg_free_command(struct bsg_command *bc)
112 103 {
113 104 struct bsg_device *bd = bc->bd;
114   - unsigned long bitnr = bc - bd->cmd_map;
115 105 unsigned long flags;
116 106  
117   - dprintk("%s: command bit offset %lu\n", bd->name, bitnr);
  107 + kmem_cache_free(bsg_cmd_cachep, bc);
118 108  
119 109 spin_lock_irqsave(&bd->lock, flags);
120 110 bd->queued_cmds--;
121   - __clear_bit(bitnr, bd->cmd_bitmap);
122 111 spin_unlock_irqrestore(&bd->lock, flags);
123 112  
124 113 wake_up(&bd->wq_free);
125 114  
126 115  
127 116  
128 117  
129 118  
130 119  
... ... @@ -127,32 +116,29 @@
127 116 static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd)
128 117 {
129 118 struct bsg_command *bc = NULL;
130   - unsigned long *map;
131   - int free_nr;
132 119  
133 120 spin_lock_irq(&bd->lock);
134 121  
135 122 if (bd->queued_cmds >= bd->max_queue)
136 123 goto out;
137 124  
138   - for (free_nr = 0, map = bd->cmd_bitmap; *map == ~0UL; map++)
139   - free_nr += BSG_CMDS_PER_LONG;
140   -
141   - BUG_ON(*map == ~0UL);
142   -
143 125 bd->queued_cmds++;
144   - free_nr += ffz(*map);
145   - __set_bit(free_nr, bd->cmd_bitmap);
146 126 spin_unlock_irq(&bd->lock);
147 127  
148   - bc = bd->cmd_map + free_nr;
  128 + bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER);
  129 + if (unlikely(!bc)) {
  130 + spin_lock_irq(&bd->lock);
  131 + goto alloc_fail;
  132 + }
  133 +
149 134 memset(bc, 0, sizeof(*bc));
150 135 bc->bd = bd;
151 136 INIT_LIST_HEAD(&bc->list);
152   - dprintk("%s: returning free cmd %p (bit %d)\n", bd->name, bc, free_nr);
  137 + dprintk("%s: returning free cmd %p\n", bd->name, bc);
153 138 return bc;
  139 +alloc_fail:
  140 + bd->queued_cmds--;
154 141 out:
155   - dprintk("%s: failed (depth %d)\n", bd->name, bd->queued_cmds);
156 142 spin_unlock_irq(&bd->lock);
157 143 return bc;
158 144 }
... ... @@ -356,8 +342,8 @@
356 342 struct bsg_device *bd = bc->bd;
357 343 unsigned long flags;
358 344  
359   - dprintk("%s: finished rq %p bc %p, bio %p offset %Zd stat %d\n",
360   - bd->name, rq, bc, bc->bio, bc - bd->cmd_map, uptodate);
  345 + dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
  346 + bd->name, rq, bc, bc->bio, uptodate);
361 347  
362 348 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
363 349  
364 350  
365 351  
... ... @@ -703,21 +689,9 @@
703 689 return bytes_read;
704 690 }
705 691  
706   -static void bsg_free_device(struct bsg_device *bd)
707   -{
708   - if (bd->cmd_map)
709   - free_pages((unsigned long) bd->cmd_map, BSG_CMDS_PAGE_ORDER);
710   -
711   - kfree(bd->cmd_bitmap);
712   - kfree(bd);
713   -}
714   -
715 692 static struct bsg_device *bsg_alloc_device(void)
716 693 {
717   - struct bsg_command *cmd_map;
718   - unsigned long *cmd_bitmap;
719 694 struct bsg_device *bd;
720   - int bits;
721 695  
722 696 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
723 697 if (unlikely(!bd))
724 698  
... ... @@ -725,20 +699,8 @@
725 699  
726 700 spin_lock_init(&bd->lock);
727 701  
728   - bd->max_queue = BSG_CMDS;
  702 + bd->max_queue = BSG_DEFAULT_CMDS;
729 703  
730   - bits = (BSG_CMDS / BSG_CMDS_PER_LONG) + 1;
731   - cmd_bitmap = kzalloc(bits * sizeof(unsigned long), GFP_KERNEL);
732   - if (!cmd_bitmap)
733   - goto out_free_bd;
734   - bd->cmd_bitmap = cmd_bitmap;
735   -
736   - cmd_map = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
737   - BSG_CMDS_PAGE_ORDER);
738   - if (!cmd_map)
739   - goto out_free_bitmap;
740   - bd->cmd_map = cmd_map;
741   -
742 704 INIT_LIST_HEAD(&bd->busy_list);
743 705 INIT_LIST_HEAD(&bd->done_list);
744 706 INIT_HLIST_NODE(&bd->dev_list);
... ... @@ -746,12 +708,6 @@
746 708 init_waitqueue_head(&bd->wq_free);
747 709 init_waitqueue_head(&bd->wq_done);
748 710 return bd;
749   -
750   -out_free_bitmap:
751   - kfree(cmd_bitmap);
752   -out_free_bd:
753   - kfree(bd);
754   - return NULL;
755 711 }
756 712  
757 713 static int bsg_put_device(struct bsg_device *bd)
... ... @@ -779,7 +735,7 @@
779 735  
780 736 blk_put_queue(bd->queue);
781 737 hlist_del(&bd->dev_list);
782   - bsg_free_device(bd);
  738 + kfree(bd);
783 739 out:
784 740 mutex_unlock(&bsg_mutex);
785 741 return ret;
786 742  
787 743  
788 744  
... ... @@ -918,15 +874,17 @@
918 874 */
919 875 case SG_GET_COMMAND_Q:
920 876 return put_user(bd->max_queue, uarg);
921   - case SG_SET_COMMAND_Q: {
  877 + case SG_SET_COMMAND_Q: {
922 878 int queue;
923 879  
924 880 if (get_user(queue, uarg))
925 881 return -EFAULT;
926   - if (queue > BSG_CMDS || queue < 1)
  882 + if (queue < 1)
927 883 return -EINVAL;
928 884  
  885 + spin_lock_irq(&bd->lock);
929 886 bd->max_queue = queue;
  887 + spin_unlock_irq(&bd->lock);
930 888 return 0;
931 889 }
932 890  
933 891  
934 892  
935 893  
... ... @@ -1035,15 +993,25 @@
1035 993 {
1036 994 int ret, i;
1037 995  
  996 + bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
  997 + sizeof(struct bsg_command), 0, 0, NULL, NULL);
  998 + if (!bsg_cmd_cachep) {
  999 + printk(KERN_ERR "bsg: failed creating slab cache\n");
  1000 + return -ENOMEM;
  1001 + }
  1002 +
1038 1003 for (i = 0; i < BSG_LIST_SIZE; i++)
1039 1004 INIT_HLIST_HEAD(&bsg_device_list[i]);
1040 1005  
1041 1006 bsg_class = class_create(THIS_MODULE, "bsg");
1042   - if (IS_ERR(bsg_class))
  1007 + if (IS_ERR(bsg_class)) {
  1008 + kmem_cache_destroy(bsg_cmd_cachep);
1043 1009 return PTR_ERR(bsg_class);
  1010 + }
1044 1011  
1045 1012 ret = register_chrdev(BSG_MAJOR, "bsg", &bsg_fops);
1046 1013 if (ret) {
  1014 + kmem_cache_destroy(bsg_cmd_cachep);
1047 1015 class_destroy(bsg_class);
1048 1016 return ret;
1049 1017 }