Commit aa387cc895672b00f807ad7c734a2defaf677712
Committed by
Jens Axboe
1 parent
24c3047095
Exists in
master
and in
20 other branches
block: add bsg helper library
This moves the FC classes bsg code to the block layer and makes it a lib so that other classes like iscsi and SAS can use it. It is helpful because working with the request queue, bios, creating scatterlists, etc are a pain that the LLD does not have to worry about with normal IOs and should not have to worry about for bsg requests. Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Showing 5 changed files with 385 additions and 0 deletions Side-by-side Diff
block/Kconfig
... | ... | @@ -65,6 +65,16 @@ |
65 | 65 | |
66 | 66 | If unsure, say Y. |
67 | 67 | |
68 | +config BLK_DEV_BSGLIB | |
69 | + bool "Block layer SG support v4 helper lib" | |
70 | + default n | |
71 | + select BLK_DEV_BSG | |
72 | + help | |
73 | + Subsystems will normally enable this if needed. Users will not | |
74 | + normally need to manually enable this. | |
75 | + | |
76 | + If unsure, say N. | |
77 | + | |
68 | 78 | config BLK_DEV_INTEGRITY |
69 | 79 | bool "Block layer data integrity support" |
70 | 80 | ---help--- |
block/Makefile
... | ... | @@ -8,6 +8,7 @@ |
8 | 8 | blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o |
9 | 9 | |
10 | 10 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o |
11 | +obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o | |
11 | 12 | obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o |
12 | 13 | obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o |
13 | 14 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o |
block/bsg-lib.c
1 | +/* | |
2 | + * BSG helper library | |
3 | + * | |
4 | + * Copyright (C) 2008 James Smart, Emulex Corporation | |
5 | + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. | |
6 | + * Copyright (C) 2011 Mike Christie | |
7 | + * | |
8 | + * This program is free software; you can redistribute it and/or modify | |
9 | + * it under the terms of the GNU General Public License as published by | |
10 | + * the Free Software Foundation; either version 2 of the License, or | |
11 | + * (at your option) any later version. | |
12 | + * | |
13 | + * This program is distributed in the hope that it will be useful, | |
14 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | + * GNU General Public License for more details. | |
17 | + * | |
18 | + * You should have received a copy of the GNU General Public License | |
19 | + * along with this program; if not, write to the Free Software | |
20 | + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | + * | |
22 | + */ | |
23 | +#include <linux/slab.h> | |
24 | +#include <linux/blkdev.h> | |
25 | +#include <linux/delay.h> | |
26 | +#include <linux/scatterlist.h> | |
27 | +#include <linux/bsg-lib.h> | |
28 | +#include <scsi/scsi_cmnd.h> | |
29 | + | |
30 | +/** | |
31 | + * bsg_destroy_job - routine to teardown/delete a bsg job | |
32 | + * @job: bsg_job that is to be torn down | |
33 | + */ | |
34 | +static void bsg_destroy_job(struct bsg_job *job) | |
35 | +{ | |
36 | + put_device(job->dev); /* release reference for the request */ | |
37 | + | |
38 | + kfree(job->request_payload.sg_list); | |
39 | + kfree(job->reply_payload.sg_list); | |
40 | + kfree(job); | |
41 | +} | |
42 | + | |
43 | +/** | |
44 | + * bsg_job_done - completion routine for bsg requests | |
45 | + * @job: bsg_job that is complete | |
46 | + * @result: job reply result | |
47 | + * @reply_payload_rcv_len: length of payload recvd | |
48 | + * | |
49 | + * The LLD should call this when the bsg job has completed. | |
50 | + */ | |
51 | +void bsg_job_done(struct bsg_job *job, int result, | |
52 | + unsigned int reply_payload_rcv_len) | |
53 | +{ | |
54 | + struct request *req = job->req; | |
55 | + struct request *rsp = req->next_rq; | |
56 | + int err; | |
57 | + | |
58 | + err = job->req->errors = result; | |
59 | + if (err < 0) | |
60 | + /* we're only returning the result field in the reply */ | |
61 | + job->req->sense_len = sizeof(u32); | |
62 | + else | |
63 | + job->req->sense_len = job->reply_len; | |
64 | + /* we assume all request payload was transferred, residual == 0 */ | |
65 | + req->resid_len = 0; | |
66 | + | |
67 | + if (rsp) { | |
68 | + WARN_ON(reply_payload_rcv_len > rsp->resid_len); | |
69 | + | |
70 | + /* set reply (bidi) residual */ | |
71 | + rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len); | |
72 | + } | |
73 | + blk_complete_request(req); | |
74 | +} | |
75 | +EXPORT_SYMBOL_GPL(bsg_job_done); | |
76 | + | |
77 | +/** | |
78 | + * bsg_softirq_done - softirq done routine for destroying the bsg requests | |
79 | + * @rq: BSG request that holds the job to be destroyed | |
80 | + */ | |
81 | +static void bsg_softirq_done(struct request *rq) | |
82 | +{ | |
83 | + struct bsg_job *job = rq->special; | |
84 | + | |
85 | + blk_end_request_all(rq, rq->errors); | |
86 | + bsg_destroy_job(job); | |
87 | +} | |
88 | + | |
89 | +static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) | |
90 | +{ | |
91 | + size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); | |
92 | + | |
93 | + BUG_ON(!req->nr_phys_segments); | |
94 | + | |
95 | + buf->sg_list = kzalloc(sz, GFP_KERNEL); | |
96 | + if (!buf->sg_list) | |
97 | + return -ENOMEM; | |
98 | + sg_init_table(buf->sg_list, req->nr_phys_segments); | |
99 | + buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); | |
100 | + buf->payload_len = blk_rq_bytes(req); | |
101 | + return 0; | |
102 | +} | |
103 | + | |
104 | +/** | |
105 | + * bsg_create_job - create the bsg_job structure for the bsg request | |
106 | + * @dev: device that is being sent the bsg request | |
107 | + * @req: BSG request that needs a job structure | |
108 | + */ | |
109 | +static int bsg_create_job(struct device *dev, struct request *req) | |
110 | +{ | |
111 | + struct request *rsp = req->next_rq; | |
112 | + struct request_queue *q = req->q; | |
113 | + struct bsg_job *job; | |
114 | + int ret; | |
115 | + | |
116 | + BUG_ON(req->special); | |
117 | + | |
118 | + job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); | |
119 | + if (!job) | |
120 | + return -ENOMEM; | |
121 | + | |
122 | + req->special = job; | |
123 | + job->req = req; | |
124 | + if (q->bsg_job_size) | |
125 | + job->dd_data = (void *)&job[1]; | |
126 | + job->request = req->cmd; | |
127 | + job->request_len = req->cmd_len; | |
128 | + job->reply = req->sense; | |
129 | + job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer | |
130 | + * allocated */ | |
131 | + if (req->bio) { | |
132 | + ret = bsg_map_buffer(&job->request_payload, req); | |
133 | + if (ret) | |
134 | + goto failjob_rls_job; | |
135 | + } | |
136 | + if (rsp && rsp->bio) { | |
137 | + ret = bsg_map_buffer(&job->reply_payload, rsp); | |
138 | + if (ret) | |
139 | + goto failjob_rls_rqst_payload; | |
140 | + } | |
141 | + job->dev = dev; | |
142 | + /* take a reference for the request */ | |
143 | + get_device(job->dev); | |
144 | + return 0; | |
145 | + | |
146 | +failjob_rls_rqst_payload: | |
147 | + kfree(job->request_payload.sg_list); | |
148 | +failjob_rls_job: | |
149 | + kfree(job); | |
150 | + return -ENOMEM; | |
151 | +} | |
152 | + | |
153 | +/* | |
154 | + * bsg_goose_queue - restart queue in case it was stopped | |
155 | + * @q: request q to be restarted | |
156 | + */ | |
157 | +void bsg_goose_queue(struct request_queue *q) | |
158 | +{ | |
159 | + if (!q) | |
160 | + return; | |
161 | + | |
162 | + blk_run_queue_async(q); | |
163 | +} | |
164 | +EXPORT_SYMBOL_GPL(bsg_goose_queue); | |
165 | + | |
166 | +/** | |
167 | + * bsg_request_fn - generic handler for bsg requests | |
168 | + * @q: request queue to manage | |
169 | + * | |
170 | + * On error the create_bsg_job function should return a -Exyz error value | |
171 | + * that will be set to the req->errors. | |
172 | + * | |
173 | + * Drivers/subsys should pass this to the queue init function. | |
174 | + */ | |
175 | +void bsg_request_fn(struct request_queue *q) | |
176 | +{ | |
177 | + struct device *dev = q->queuedata; | |
178 | + struct request *req; | |
179 | + struct bsg_job *job; | |
180 | + int ret; | |
181 | + | |
182 | + if (!get_device(dev)) | |
183 | + return; | |
184 | + | |
185 | + while (1) { | |
186 | + req = blk_fetch_request(q); | |
187 | + if (!req) | |
188 | + break; | |
189 | + spin_unlock_irq(q->queue_lock); | |
190 | + | |
191 | + ret = bsg_create_job(dev, req); | |
192 | + if (ret) { | |
193 | + req->errors = ret; | |
194 | + blk_end_request_all(req, ret); | |
195 | + spin_lock_irq(q->queue_lock); | |
196 | + continue; | |
197 | + } | |
198 | + | |
199 | + job = req->special; | |
200 | + ret = q->bsg_job_fn(job); | |
201 | + spin_lock_irq(q->queue_lock); | |
202 | + if (ret) | |
203 | + break; | |
204 | + } | |
205 | + | |
206 | + spin_unlock_irq(q->queue_lock); | |
207 | + put_device(dev); | |
208 | + spin_lock_irq(q->queue_lock); | |
209 | +} | |
210 | +EXPORT_SYMBOL_GPL(bsg_request_fn); | |
211 | + | |
212 | +/** | |
213 | + * bsg_setup_queue - Create and add the bsg hooks so we can receive requests | |
214 | + * @dev: device to attach bsg device to | |
215 | + * @q: request queue setup by caller | |
216 | + * @name: device to give bsg device | |
217 | + * @job_fn: bsg job handler | |
218 | + * @dd_job_size: size of LLD data needed for each job | |
219 | + * | |
220 | + * The caller should have setup the reuqest queue with bsg_request_fn | |
221 | + * as the request_fn. | |
222 | + */ | |
223 | +int bsg_setup_queue(struct device *dev, struct request_queue *q, | |
224 | + char *name, bsg_job_fn *job_fn, int dd_job_size) | |
225 | +{ | |
226 | + int ret; | |
227 | + | |
228 | + q->queuedata = dev; | |
229 | + q->bsg_job_size = dd_job_size; | |
230 | + q->bsg_job_fn = job_fn; | |
231 | + queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); | |
232 | + blk_queue_softirq_done(q, bsg_softirq_done); | |
233 | + blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); | |
234 | + | |
235 | + ret = bsg_register_queue(q, dev, name, NULL); | |
236 | + if (ret) { | |
237 | + printk(KERN_ERR "%s: bsg interface failed to " | |
238 | + "initialize - register queue\n", dev->kobj.name); | |
239 | + return ret; | |
240 | + } | |
241 | + | |
242 | + return 0; | |
243 | +} | |
244 | +EXPORT_SYMBOL_GPL(bsg_setup_queue); | |
245 | + | |
246 | +/** | |
247 | + * bsg_remove_queue - Deletes the bsg dev from the q | |
248 | + * @q: the request_queue that is to be torn down. | |
249 | + * | |
250 | + * Notes: | |
251 | + * Before unregistering the queue empty any requests that are blocked | |
252 | + */ | |
253 | +void bsg_remove_queue(struct request_queue *q) | |
254 | +{ | |
255 | + struct request *req; /* block request */ | |
256 | + int counts; /* totals for request_list count and starved */ | |
257 | + | |
258 | + if (!q) | |
259 | + return; | |
260 | + | |
261 | + /* Stop taking in new requests */ | |
262 | + spin_lock_irq(q->queue_lock); | |
263 | + blk_stop_queue(q); | |
264 | + | |
265 | + /* drain all requests in the queue */ | |
266 | + while (1) { | |
267 | + /* need the lock to fetch a request | |
268 | + * this may fetch the same reqeust as the previous pass | |
269 | + */ | |
270 | + req = blk_fetch_request(q); | |
271 | + /* save requests in use and starved */ | |
272 | + counts = q->rq.count[0] + q->rq.count[1] + | |
273 | + q->rq.starved[0] + q->rq.starved[1]; | |
274 | + spin_unlock_irq(q->queue_lock); | |
275 | + /* any requests still outstanding? */ | |
276 | + if (counts == 0) | |
277 | + break; | |
278 | + | |
279 | + /* This may be the same req as the previous iteration, | |
280 | + * always send the blk_end_request_all after a prefetch. | |
281 | + * It is not okay to not end the request because the | |
282 | + * prefetch started the request. | |
283 | + */ | |
284 | + if (req) { | |
285 | + /* return -ENXIO to indicate that this queue is | |
286 | + * going away | |
287 | + */ | |
288 | + req->errors = -ENXIO; | |
289 | + blk_end_request_all(req, -ENXIO); | |
290 | + } | |
291 | + | |
292 | + msleep(200); /* allow bsg to possibly finish */ | |
293 | + spin_lock_irq(q->queue_lock); | |
294 | + } | |
295 | + bsg_unregister_queue(q); | |
296 | +} | |
297 | +EXPORT_SYMBOL_GPL(bsg_remove_queue); |
include/linux/blkdev.h
... | ... | @@ -30,6 +30,7 @@ |
30 | 30 | struct blk_trace; |
31 | 31 | struct request; |
32 | 32 | struct sg_io_hdr; |
33 | +struct bsg_job; | |
33 | 34 | |
34 | 35 | #define BLKDEV_MIN_RQ 4 |
35 | 36 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
... | ... | @@ -209,6 +210,7 @@ |
209 | 210 | typedef void (softirq_done_fn)(struct request *); |
210 | 211 | typedef int (dma_drain_needed_fn)(struct request *); |
211 | 212 | typedef int (lld_busy_fn) (struct request_queue *q); |
213 | +typedef int (bsg_job_fn) (struct bsg_job *); | |
212 | 214 | |
213 | 215 | enum blk_eh_timer_return { |
214 | 216 | BLK_EH_NOT_HANDLED, |
... | ... | @@ -375,6 +377,8 @@ |
375 | 377 | struct mutex sysfs_lock; |
376 | 378 | |
377 | 379 | #if defined(CONFIG_BLK_DEV_BSG) |
380 | + bsg_job_fn *bsg_job_fn; | |
381 | + int bsg_job_size; | |
378 | 382 | struct bsg_class_device bsg_dev; |
379 | 383 | #endif |
380 | 384 |
include/linux/bsg-lib.h
1 | +/* | |
2 | + * BSG helper library | |
3 | + * | |
4 | + * Copyright (C) 2008 James Smart, Emulex Corporation | |
5 | + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. | |
6 | + * Copyright (C) 2011 Mike Christie | |
7 | + * | |
8 | + * This program is free software; you can redistribute it and/or modify | |
9 | + * it under the terms of the GNU General Public License as published by | |
10 | + * the Free Software Foundation; either version 2 of the License, or | |
11 | + * (at your option) any later version. | |
12 | + * | |
13 | + * This program is distributed in the hope that it will be useful, | |
14 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | + * GNU General Public License for more details. | |
17 | + * | |
18 | + * You should have received a copy of the GNU General Public License | |
19 | + * along with this program; if not, write to the Free Software | |
20 | + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | + * | |
22 | + */ | |
23 | +#ifndef _BLK_BSG_ | |
24 | +#define _BLK_BSG_ | |
25 | + | |
26 | +#include <linux/blkdev.h> | |
27 | + | |
28 | +struct request; | |
29 | +struct device; | |
30 | +struct scatterlist; | |
31 | +struct request_queue; | |
32 | + | |
33 | +struct bsg_buffer { | |
34 | + unsigned int payload_len; | |
35 | + int sg_cnt; | |
36 | + struct scatterlist *sg_list; | |
37 | +}; | |
38 | + | |
39 | +struct bsg_job { | |
40 | + struct device *dev; | |
41 | + struct request *req; | |
42 | + | |
43 | + /* Transport/driver specific request/reply structs */ | |
44 | + void *request; | |
45 | + void *reply; | |
46 | + | |
47 | + unsigned int request_len; | |
48 | + unsigned int reply_len; | |
49 | + /* | |
50 | + * On entry : reply_len indicates the buffer size allocated for | |
51 | + * the reply. | |
52 | + * | |
53 | + * Upon completion : the message handler must set reply_len | |
54 | + * to indicates the size of the reply to be returned to the | |
55 | + * caller. | |
56 | + */ | |
57 | + | |
58 | + /* DMA payloads for the request/response */ | |
59 | + struct bsg_buffer request_payload; | |
60 | + struct bsg_buffer reply_payload; | |
61 | + | |
62 | + void *dd_data; /* Used for driver-specific storage */ | |
63 | +}; | |
64 | + | |
65 | +void bsg_job_done(struct bsg_job *job, int result, | |
66 | + unsigned int reply_payload_rcv_len); | |
67 | +int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, | |
68 | + bsg_job_fn *job_fn, int dd_job_size); | |
69 | +void bsg_request_fn(struct request_queue *q); | |
70 | +void bsg_remove_queue(struct request_queue *q); | |
71 | +void bsg_goose_queue(struct request_queue *q); | |
72 | + | |
73 | +#endif |