Blame view
block/bsg-lib.c
9.9 KB
a497ee34a
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
aa387cc89
|
2 3 4 5 6 7 |
/* * BSG helper library * * Copyright (C) 2008 James Smart, Emulex Corporation * Copyright (C) 2011 Red Hat, Inc. All rights reserved. * Copyright (C) 2011 Mike Christie |
aa387cc89
|
8 9 |
*/ #include <linux/slab.h> |
cd2f076f1
|
10 |
#include <linux/blk-mq.h> |
aa387cc89
|
11 12 13 |
#include <linux/delay.h> #include <linux/scatterlist.h> #include <linux/bsg-lib.h> |
6adb1236b
|
14 |
#include <linux/export.h> |
aa387cc89
|
15 |
#include <scsi/scsi_cmnd.h> |
17cb960f2
|
16 17 18 |
#include <scsi/sg.h> #define uptr64(val) ((void __user *)(uintptr_t)(val)) |
1028e4b33
|
19 20 21 22 23 |
struct bsg_set { struct blk_mq_tag_set tag_set; bsg_job_fn *job_fn; bsg_timeout_fn *timeout_fn; }; |
17cb960f2
|
24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
static int bsg_transport_check_proto(struct sg_io_v4 *hdr) { if (hdr->protocol != BSG_PROTOCOL_SCSI || hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; return 0; } static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, fmode_t mode) { struct bsg_job *job = blk_mq_rq_to_pdu(rq); |
972248e91
|
38 |
int ret; |
17cb960f2
|
39 40 41 |
job->request_len = hdr->request_len; job->request = memdup_user(uptr64(hdr->request), hdr->request_len); |
972248e91
|
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
if (IS_ERR(job->request)) return PTR_ERR(job->request); if (hdr->dout_xfer_len && hdr->din_xfer_len) { job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0); if (IS_ERR(job->bidi_rq)) { ret = PTR_ERR(job->bidi_rq); goto out; } ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, uptr64(hdr->din_xferp), hdr->din_xfer_len, GFP_KERNEL); if (ret) goto out_free_bidi_rq; job->bidi_bio = job->bidi_rq->bio; } else { job->bidi_rq = NULL; job->bidi_bio = NULL; } |
472554919
|
63 |
|
972248e91
|
64 65 66 67 68 69 70 71 |
return 0; out_free_bidi_rq: if (job->bidi_rq) blk_put_request(job->bidi_rq); out: kfree(job->request); return ret; |
17cb960f2
|
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
} static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr) { struct bsg_job *job = blk_mq_rq_to_pdu(rq); int ret = 0; /* * The assignments below don't make much sense, but are kept for * bug by bug backwards compatibility: */ hdr->device_status = job->result & 0xff; hdr->transport_status = host_byte(job->result); hdr->driver_status = driver_byte(job->result); hdr->info = 0; if (hdr->device_status || hdr->transport_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; hdr->response_len = 0; if (job->result < 0) { /* we're only returning the result field in the reply */ job->reply_len = sizeof(u32); ret = job->result; } if (job->reply_len && hdr->response) { int len = min(hdr->max_response_len, job->reply_len); if (copy_to_user(uptr64(hdr->response), job->reply, len)) ret = -EFAULT; else hdr->response_len = len; } /* we assume all request payload was transferred, residual == 0 */ hdr->dout_resid = 0; |
972248e91
|
108 |
if (job->bidi_rq) { |
17cb960f2
|
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
unsigned int rsp_len = job->reply_payload.payload_len; if (WARN_ON(job->reply_payload_rcv_len > rsp_len)) hdr->din_resid = 0; else hdr->din_resid = rsp_len - job->reply_payload_rcv_len; } else { hdr->din_resid = 0; } return ret; } static void bsg_transport_free_rq(struct request *rq) { struct bsg_job *job = blk_mq_rq_to_pdu(rq); |
972248e91
|
125 126 127 128 |
if (job->bidi_rq) { blk_rq_unmap_user(job->bidi_bio); blk_put_request(job->bidi_rq); } |
17cb960f2
|
129 130 131 132 133 134 135 136 137 |
kfree(job->request); } static const struct bsg_ops bsg_transport_ops = { .check_proto = bsg_transport_check_proto, .fill_hdr = bsg_transport_fill_hdr, .complete_rq = bsg_transport_complete_rq, .free_rq = bsg_transport_free_rq, }; |
aa387cc89
|
138 139 |
/** |
50b4d4855
|
140 |
* bsg_teardown_job - routine to teardown a bsg job |
aa98192de
|
141 |
* @kref: kref inside bsg_job that is to be torn down |
aa387cc89
|
142 |
*/ |
50b4d4855
|
143 |
static void bsg_teardown_job(struct kref *kref) |
aa387cc89
|
144 |
{ |
bf0f2d380
|
145 |
struct bsg_job *job = container_of(kref, struct bsg_job, kref); |
ef6fa64f9
|
146 |
struct request *rq = blk_mq_rq_from_pdu(job); |
c00da4c90
|
147 |
|
aa387cc89
|
148 149 150 151 |
put_device(job->dev); /* release reference for the request */ kfree(job->request_payload.sg_list); kfree(job->reply_payload.sg_list); |
50b4d4855
|
152 |
|
cd2f076f1
|
153 |
blk_mq_end_request(rq, BLK_STS_OK); |
aa387cc89
|
154 |
} |
fb6f7c8d8
|
155 156 |
void bsg_job_put(struct bsg_job *job) { |
50b4d4855
|
157 |
kref_put(&job->kref, bsg_teardown_job); |
fb6f7c8d8
|
158 159 160 161 162 163 164 165 |
} EXPORT_SYMBOL_GPL(bsg_job_put); int bsg_job_get(struct bsg_job *job) { return kref_get_unless_zero(&job->kref); } EXPORT_SYMBOL_GPL(bsg_job_get); |
aa387cc89
|
166 167 168 169 170 171 172 173 174 175 176 177 |
/** * bsg_job_done - completion routine for bsg requests * @job: bsg_job that is complete * @result: job reply result * @reply_payload_rcv_len: length of payload recvd * * The LLD should call this when the bsg job has completed. */ void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len) { |
15f73f5b3
|
178 |
struct request *rq = blk_mq_rq_from_pdu(job); |
17cb960f2
|
179 180 |
job->result = result; job->reply_payload_rcv_len = reply_payload_rcv_len; |
15f73f5b3
|
181 182 |
if (likely(!blk_should_fake_timeout(rq->q))) blk_mq_complete_request(rq); |
aa387cc89
|
183 184 185 186 |
} EXPORT_SYMBOL_GPL(bsg_job_done); /** |
cd2f076f1
|
187 |
* bsg_complete - softirq done routine for destroying the bsg requests |
aa387cc89
|
188 189 |
* @rq: BSG request that holds the job to be destroyed */ |
cd2f076f1
|
190 |
static void bsg_complete(struct request *rq) |
aa387cc89
|
191 |
{ |
50b4d4855
|
192 |
struct bsg_job *job = blk_mq_rq_to_pdu(rq); |
aa387cc89
|
193 |
|
fb6f7c8d8
|
194 |
bsg_job_put(job); |
aa387cc89
|
195 196 197 198 199 200 201 |
} static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) { size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); BUG_ON(!req->nr_phys_segments); |
f952eefe7
|
202 |
buf->sg_list = kmalloc(sz, GFP_KERNEL); |
aa387cc89
|
203 204 205 206 207 208 209 210 211 |
if (!buf->sg_list) return -ENOMEM; sg_init_table(buf->sg_list, req->nr_phys_segments); buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); buf->payload_len = blk_rq_bytes(req); return 0; } /** |
50b4d4855
|
212 |
* bsg_prepare_job - create the bsg_job structure for the bsg request |
aa387cc89
|
213 214 215 |
* @dev: device that is being sent the bsg request * @req: BSG request that needs a job structure */ |
17cb960f2
|
216 |
static bool bsg_prepare_job(struct device *dev, struct request *req) |
aa387cc89
|
217 |
{ |
50b4d4855
|
218 |
struct bsg_job *job = blk_mq_rq_to_pdu(req); |
aa387cc89
|
219 |
int ret; |
31156ec37
|
220 |
job->timeout = req->timeout; |
50b4d4855
|
221 |
|
aa387cc89
|
222 223 224 225 226 |
if (req->bio) { ret = bsg_map_buffer(&job->request_payload, req); if (ret) goto failjob_rls_job; } |
972248e91
|
227 228 |
if (job->bidi_rq) { ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq); |
aa387cc89
|
229 230 231 232 233 234 |
if (ret) goto failjob_rls_rqst_payload; } job->dev = dev; /* take a reference for the request */ get_device(job->dev); |
bf0f2d380
|
235 |
kref_init(&job->kref); |
17cb960f2
|
236 |
return true; |
aa387cc89
|
237 238 239 240 |
failjob_rls_rqst_payload: kfree(job->request_payload.sg_list); failjob_rls_job: |
17cb960f2
|
241 242 |
job->result = -ENOMEM; return false; |
aa387cc89
|
243 |
} |
aa387cc89
|
244 |
/** |
cd2f076f1
|
245 246 247 |
* bsg_queue_rq - generic handler for bsg requests * @hctx: hardware queue * @bd: queue data |
aa387cc89
|
248 249 |
* * On error the create_bsg_job function should return a -Exyz error value |
17d5363b8
|
250 |
* that will be set to ->result. |
aa387cc89
|
251 252 253 |
* * Drivers/subsys should pass this to the queue init function. */ |
cd2f076f1
|
254 255 |
static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) |
aa387cc89
|
256 |
{ |
cd2f076f1
|
257 |
struct request_queue *q = hctx->queue; |
aa387cc89
|
258 |
struct device *dev = q->queuedata; |
cd2f076f1
|
259 |
struct request *req = bd->rq; |
1028e4b33
|
260 261 |
struct bsg_set *bset = container_of(q->tag_set, struct bsg_set, tag_set); |
c44a4edb2
|
262 |
blk_status_t sts = BLK_STS_IOERR; |
aa387cc89
|
263 |
int ret; |
cd2f076f1
|
264 |
blk_mq_start_request(req); |
aa387cc89
|
265 |
if (!get_device(dev)) |
cd2f076f1
|
266 267 268 |
return BLK_STS_IOERR; if (!bsg_prepare_job(dev, req)) |
d46fe2cb2
|
269 |
goto out; |
cd2f076f1
|
270 |
|
1028e4b33
|
271 |
ret = bset->job_fn(blk_mq_rq_to_pdu(req)); |
d46fe2cb2
|
272 273 |
if (!ret) sts = BLK_STS_OK; |
aa387cc89
|
274 |
|
d46fe2cb2
|
275 |
out: |
aa387cc89
|
276 |
put_device(dev); |
d46fe2cb2
|
277 |
return sts; |
aa387cc89
|
278 |
} |
aa387cc89
|
279 |
|
17cb960f2
|
280 |
/* called right after the request is allocated for the request_queue */ |
cd2f076f1
|
281 282 |
static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req, unsigned int hctx_idx, unsigned int numa_node) |
50b4d4855
|
283 284 |
{ struct bsg_job *job = blk_mq_rq_to_pdu(req); |
eab40cf33
|
285 |
|
cd2f076f1
|
286 |
job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); |
17cb960f2
|
287 |
if (!job->reply) |
eab40cf33
|
288 |
return -ENOMEM; |
eab40cf33
|
289 290 |
return 0; } |
17cb960f2
|
291 |
/* called right before the request is given to the request_queue user */ |
eab40cf33
|
292 293 294 |
static void bsg_initialize_rq(struct request *req) { struct bsg_job *job = blk_mq_rq_to_pdu(req); |
17cb960f2
|
295 |
void *reply = job->reply; |
eab40cf33
|
296 |
|
50b4d4855
|
297 |
memset(job, 0, sizeof(*job)); |
17cb960f2
|
298 299 |
job->reply = reply; job->reply_len = SCSI_SENSE_BUFFERSIZE; |
50b4d4855
|
300 |
job->dd_data = job + 1; |
50b4d4855
|
301 |
} |
cd2f076f1
|
302 303 |
static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req, unsigned int hctx_idx) |
50b4d4855
|
304 305 |
{ struct bsg_job *job = blk_mq_rq_to_pdu(req); |
50b4d4855
|
306 |
|
17cb960f2
|
307 |
kfree(job->reply); |
50b4d4855
|
308 |
} |
5e28b8d8a
|
309 310 311 |
void bsg_remove_queue(struct request_queue *q) { if (q) { |
1028e4b33
|
312 313 |
struct bsg_set *bset = container_of(q->tag_set, struct bsg_set, tag_set); |
cd2f076f1
|
314 |
|
5e28b8d8a
|
315 316 |
bsg_unregister_queue(q); blk_cleanup_queue(q); |
1028e4b33
|
317 318 |
blk_mq_free_tag_set(&bset->tag_set); kfree(bset); |
5e28b8d8a
|
319 320 321 |
} } EXPORT_SYMBOL_GPL(bsg_remove_queue); |
cd2f076f1
|
322 323 |
static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved) { |
1028e4b33
|
324 325 |
struct bsg_set *bset = container_of(rq->q->tag_set, struct bsg_set, tag_set); |
cd2f076f1
|
326 |
|
1028e4b33
|
327 328 329 |
if (!bset->timeout_fn) return BLK_EH_DONE; return bset->timeout_fn(rq); |
cd2f076f1
|
330 331 332 333 334 335 336 337 338 339 |
} static const struct blk_mq_ops bsg_mq_ops = { .queue_rq = bsg_queue_rq, .init_request = bsg_init_rq, .exit_request = bsg_exit_rq, .initialize_rq_fn = bsg_initialize_rq, .complete = bsg_complete, .timeout = bsg_timeout, }; |
aa387cc89
|
340 341 342 |
/** * bsg_setup_queue - Create and add the bsg hooks so we can receive requests * @dev: device to attach bsg device to |
aa387cc89
|
343 344 |
* @name: device to give bsg device * @job_fn: bsg job handler |
a0b77e36e
|
345 |
* @timeout: timeout handler function pointer |
aa387cc89
|
346 |
* @dd_job_size: size of LLD data needed for each job |
aa387cc89
|
347 |
*/ |
c1225f01a
|
348 |
struct request_queue *bsg_setup_queue(struct device *dev, const char *name, |
1028e4b33
|
349 |
bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size) |
aa387cc89
|
350 |
{ |
1028e4b33
|
351 |
struct bsg_set *bset; |
cd2f076f1
|
352 |
struct blk_mq_tag_set *set; |
8ae94eb65
|
353 |
struct request_queue *q; |
cd2f076f1
|
354 |
int ret = -ENOMEM; |
aa387cc89
|
355 |
|
1028e4b33
|
356 357 |
bset = kzalloc(sizeof(*bset), GFP_KERNEL); if (!bset) |
8ae94eb65
|
358 |
return ERR_PTR(-ENOMEM); |
82ed4db49
|
359 |
|
1028e4b33
|
360 361 362 363 |
bset->job_fn = job_fn; bset->timeout_fn = timeout; set = &bset->tag_set; |
03ef5941a
|
364 |
set->ops = &bsg_mq_ops; |
cd2f076f1
|
365 366 367 368 369 370 371 372 373 374 375 376 377 |
set->nr_hw_queues = 1; set->queue_depth = 128; set->numa_node = NUMA_NO_NODE; set->cmd_size = sizeof(struct bsg_job) + dd_job_size; set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING; if (blk_mq_alloc_tag_set(set)) goto out_tag_set; q = blk_mq_init_queue(set); if (IS_ERR(q)) { ret = PTR_ERR(q); goto out_queue; } |
8ae94eb65
|
378 |
|
aa387cc89
|
379 |
q->queuedata = dev; |
aa387cc89
|
380 |
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); |
5de815a7e
|
381 |
ret = bsg_register_queue(q, dev, name, &bsg_transport_ops); |
aa387cc89
|
382 383 384 385 |
if (ret) { printk(KERN_ERR "%s: bsg interface failed to " "initialize - register queue ", dev->kobj.name); |
82ed4db49
|
386 |
goto out_cleanup_queue; |
aa387cc89
|
387 |
} |
8ae94eb65
|
388 |
return q; |
82ed4db49
|
389 390 |
out_cleanup_queue: blk_cleanup_queue(q); |
cd2f076f1
|
391 392 393 |
out_queue: blk_mq_free_tag_set(set); out_tag_set: |
1028e4b33
|
394 |
kfree(bset); |
82ed4db49
|
395 |
return ERR_PTR(ret); |
aa387cc89
|
396 397 |
} EXPORT_SYMBOL_GPL(bsg_setup_queue); |