Blame view
block/bsg.c
23.7 KB
3d6392cfb
|
1 |
/* |
0c6a89ba6
|
2 |
* bsg.c - block layer implementation of the sg v4 interface |
3d6392cfb
|
3 4 5 6 7 8 9 10 11 |
* * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> * * This file is subject to the terms and conditions of the GNU General Public * License version 2. See the file "COPYING" in the main directory of this * archive for more details. * */ |
3d6392cfb
|
12 13 14 15 16 17 |
#include <linux/module.h> #include <linux/init.h> #include <linux/file.h> #include <linux/blkdev.h> #include <linux/poll.h> #include <linux/cdev.h> |
ad5ebd2fa
|
18 |
#include <linux/jiffies.h> |
3d6392cfb
|
19 20 |
#include <linux/percpu.h> #include <linux/uio.h> |
598443a21
|
21 |
#include <linux/idr.h> |
3d6392cfb
|
22 |
#include <linux/bsg.h> |
5a0e3ad6a
|
23 |
#include <linux/slab.h> |
3d6392cfb
|
24 25 26 27 |
#include <scsi/scsi.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_cmnd.h> |
4e2872d6b
|
28 29 |
#include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> |
3d6392cfb
|
30 |
#include <scsi/sg.h> |
0ed081ce2
|
31 32 |
#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" #define BSG_VERSION "0.4" |
3d6392cfb
|
33 |
|
3d6392cfb
|
34 |
struct bsg_device { |
165125e1e
|
35 |
struct request_queue *queue; |
3d6392cfb
|
36 37 38 39 40 |
spinlock_t lock; struct list_head busy_list; struct list_head done_list; struct hlist_node dev_list; atomic_t ref_count; |
3d6392cfb
|
41 42 |
int queued_cmds; int done_cmds; |
3d6392cfb
|
43 44 |
wait_queue_head_t wq_done; wait_queue_head_t wq_free; |
3ada8b7e9
|
45 |
char name[20]; |
3d6392cfb
|
46 47 48 49 50 51 |
int max_queue; unsigned long flags; }; enum { BSG_F_BLOCK = 1, |
3d6392cfb
|
52 |
}; |
5309cb38d
|
53 |
#define BSG_DEFAULT_CMDS 64 |
292b7f271
|
54 |
#define BSG_MAX_DEVS 32768 |
3d6392cfb
|
55 56 57 58 |
#undef BSG_DEBUG #ifdef BSG_DEBUG |
24c03d47d
|
59 |
#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) |
3d6392cfb
|
60 61 62 |
#else #define dprintk(fmt, args...) #endif |
3d6392cfb
|
63 |
static DEFINE_MUTEX(bsg_mutex); |
598443a21
|
64 |
static DEFINE_IDR(bsg_minor_idr); |
3d6392cfb
|
65 |
|
25fd16430
|
66 |
#define BSG_LIST_ARRAY_SIZE 8 |
25fd16430
|
67 |
static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; |
3d6392cfb
|
68 69 |
static struct class *bsg_class; |
46f6ef4af
|
70 |
static int bsg_major; |
3d6392cfb
|
71 |
|
5309cb38d
|
72 |
static struct kmem_cache *bsg_cmd_cachep; |
3d6392cfb
|
73 74 75 76 77 78 79 80 |
/* * our internal command type */ struct bsg_command { struct bsg_device *bd; struct list_head list; struct request *rq; struct bio *bio; |
2c9ecdf40
|
81 |
struct bio *bidi_bio; |
3d6392cfb
|
82 |
int err; |
70e36ecea
|
83 |
struct sg_io_v4 hdr; |
3d6392cfb
|
84 85 86 87 88 89 |
char sense[SCSI_SENSE_BUFFERSIZE]; }; static void bsg_free_command(struct bsg_command *bc) { struct bsg_device *bd = bc->bd; |
3d6392cfb
|
90 |
unsigned long flags; |
5309cb38d
|
91 |
kmem_cache_free(bsg_cmd_cachep, bc); |
3d6392cfb
|
92 93 94 |
spin_lock_irqsave(&bd->lock, flags); bd->queued_cmds--; |
3d6392cfb
|
95 96 97 98 |
spin_unlock_irqrestore(&bd->lock, flags); wake_up(&bd->wq_free); } |
e7d721732
|
99 |
static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) |
3d6392cfb
|
100 |
{ |
e7d721732
|
101 |
struct bsg_command *bc = ERR_PTR(-EINVAL); |
3d6392cfb
|
102 103 104 105 106 |
spin_lock_irq(&bd->lock); if (bd->queued_cmds >= bd->max_queue) goto out; |
3d6392cfb
|
107 |
bd->queued_cmds++; |
3d6392cfb
|
108 |
spin_unlock_irq(&bd->lock); |
25fd16430
|
109 |
bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); |
5309cb38d
|
110 111 |
if (unlikely(!bc)) { spin_lock_irq(&bd->lock); |
7e75d7308
|
112 |
bd->queued_cmds--; |
e7d721732
|
113 |
bc = ERR_PTR(-ENOMEM); |
7e75d7308
|
114 |
goto out; |
5309cb38d
|
115 |
} |
3d6392cfb
|
116 117 |
bc->bd = bd; INIT_LIST_HEAD(&bc->list); |
5309cb38d
|
118 119 |
dprintk("%s: returning free cmd %p ", bd->name, bc); |
3d6392cfb
|
120 121 |
return bc; out: |
3d6392cfb
|
122 123 124 |
spin_unlock_irq(&bd->lock); return bc; } |
1c1133e1f
|
125 |
static inline struct hlist_head *bsg_dev_idx_hash(int index) |
3d6392cfb
|
126 |
{ |
1c1133e1f
|
127 |
return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; |
3d6392cfb
|
128 |
} |
25fd16430
|
129 |
static int bsg_io_schedule(struct bsg_device *bd) |
3d6392cfb
|
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
{ DEFINE_WAIT(wait); int ret = 0; spin_lock_irq(&bd->lock); BUG_ON(bd->done_cmds > bd->queued_cmds); /* * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no * work to do", even though we return -ENOSPC after this same test * during bsg_write() -- there, it means our buffer can't have more * bsg_commands added to it, thus has no space left. */ if (bd->done_cmds == bd->queued_cmds) { ret = -ENODATA; goto unlock; } if (!test_bit(BSG_F_BLOCK, &bd->flags)) { ret = -EAGAIN; goto unlock; } |
25fd16430
|
153 |
prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); |
3d6392cfb
|
154 155 156 |
spin_unlock_irq(&bd->lock); io_schedule(); finish_wait(&bd->wq_done, &wait); |
3d6392cfb
|
157 158 159 160 161 |
return ret; unlock: spin_unlock_irq(&bd->lock); return ret; } |
165125e1e
|
162 |
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, |
abf543937
|
163 |
struct sg_io_v4 *hdr, struct bsg_device *bd, |
aeb5d7270
|
164 |
fmode_t has_write_perm) |
70e36ecea
|
165 |
{ |
9f5de6b10
|
166 167 168 169 170 |
if (hdr->request_len > BLK_MAX_CDB) { rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); if (!rq->cmd) return -ENOMEM; } |
70e36ecea
|
171 |
|
2b727c630
|
172 |
if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, |
70e36ecea
|
173 174 |
hdr->request_len)) return -EFAULT; |
15d10b611
|
175 176 |
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { |
018e04468
|
177 |
if (blk_verify_command(rq->cmd, has_write_perm)) |
15d10b611
|
178 179 |
return -EPERM; } else if (!capable(CAP_SYS_RAWIO)) |
70e36ecea
|
180 181 182 183 184 185 186 |
return -EPERM; /* * fill in request structure */ rq->cmd_len = hdr->request_len; rq->cmd_type = REQ_TYPE_BLOCK_PC; |
ad5ebd2fa
|
187 |
rq->timeout = msecs_to_jiffies(hdr->timeout); |
70e36ecea
|
188 189 190 191 |
if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_SG_TIMEOUT; |
f2f1fa78a
|
192 193 |
if (rq->timeout < BLK_MIN_SG_TIMEOUT) rq->timeout = BLK_MIN_SG_TIMEOUT; |
70e36ecea
|
194 195 196 |
return 0; } |
3d6392cfb
|
197 |
/* |
70e36ecea
|
198 |
* Check if sg_io_v4 from user is allowed and valid |
3d6392cfb
|
199 200 |
*/ static int |
165125e1e
|
201 |
bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) |
3d6392cfb
|
202 |
{ |
15d10b611
|
203 |
int ret = 0; |
70e36ecea
|
204 |
if (hdr->guard != 'Q') |
3d6392cfb
|
205 |
return -EINVAL; |
3d6392cfb
|
206 |
|
15d10b611
|
207 208 209 210 211 212 213 214 215 216 217 218 219 |
switch (hdr->protocol) { case BSG_PROTOCOL_SCSI: switch (hdr->subprotocol) { case BSG_SUB_PROTOCOL_SCSI_CMD: case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: break; default: ret = -EINVAL; } break; default: ret = -EINVAL; } |
70e36ecea
|
220 |
|
70e36ecea
|
221 |
*rw = hdr->dout_xfer_len ? WRITE : READ; |
15d10b611
|
222 |
return ret; |
3d6392cfb
|
223 224 225 |
} /* |
70e36ecea
|
226 |
* map sg_io_v4 to a request. |
3d6392cfb
|
227 228 |
*/ static struct request * |
c1c201200
|
229 230 |
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, u8 *sense) |
3d6392cfb
|
231 |
{ |
165125e1e
|
232 |
struct request_queue *q = bd->queue; |
2c9ecdf40
|
233 |
struct request *rq, *next_rq = NULL; |
25fd16430
|
234 |
int ret, rw; |
70e36ecea
|
235 |
unsigned int dxfer_len; |
2b727c630
|
236 |
void __user *dxferp = NULL; |
c7a841f3a
|
237 238 239 240 241 242 243 244 |
struct bsg_class_device *bcd = &q->bsg_dev; /* if the LLD has been removed then the bsg_unregister_queue will * eventually be called and the class_dev was freed, so we can no * longer use this request_queue. Return no such address. */ if (!bcd->class_dev) return ERR_PTR(-ENXIO); |
3d6392cfb
|
245 |
|
70e36ecea
|
246 247 248 249 |
dprintk("map hdr %llx/%u %llx/%u ", (unsigned long long) hdr->dout_xferp, hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, hdr->din_xfer_len); |
3d6392cfb
|
250 |
|
70e36ecea
|
251 |
ret = bsg_validate_sgv4_hdr(q, hdr, &rw); |
3d6392cfb
|
252 253 254 255 |
if (ret) return ERR_PTR(ret); /* |
3ad2f3fbb
|
256 |
* map scatter-gather elements separately and string them to request |
3d6392cfb
|
257 258 |
*/ rq = blk_get_request(q, rw, GFP_KERNEL); |
2c9ecdf40
|
259 260 |
if (!rq) return ERR_PTR(-ENOMEM); |
abf543937
|
261 |
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); |
2c9ecdf40
|
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 |
if (ret) goto out; if (rw == WRITE && hdr->din_xfer_len) { if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { ret = -EOPNOTSUPP; goto out; } next_rq = blk_get_request(q, READ, GFP_KERNEL); if (!next_rq) { ret = -ENOMEM; goto out; } rq->next_rq = next_rq; |
40f620286
|
277 |
next_rq->cmd_type = rq->cmd_type; |
2c9ecdf40
|
278 |
|
2b727c630
|
279 |
dxferp = (void __user *)(unsigned long)hdr->din_xferp; |
152e283fd
|
280 281 |
ret = blk_rq_map_user(q, next_rq, NULL, dxferp, hdr->din_xfer_len, GFP_KERNEL); |
2c9ecdf40
|
282 283 |
if (ret) goto out; |
3d6392cfb
|
284 |
} |
70e36ecea
|
285 286 |
if (hdr->dout_xfer_len) { dxfer_len = hdr->dout_xfer_len; |
2b727c630
|
287 |
dxferp = (void __user *)(unsigned long)hdr->dout_xferp; |
70e36ecea
|
288 289 |
} else if (hdr->din_xfer_len) { dxfer_len = hdr->din_xfer_len; |
2b727c630
|
290 |
dxferp = (void __user *)(unsigned long)hdr->din_xferp; |
70e36ecea
|
291 292 293 294 |
} else dxfer_len = 0; if (dxfer_len) { |
152e283fd
|
295 296 |
ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, GFP_KERNEL); |
2c9ecdf40
|
297 298 |
if (ret) goto out; |
3d6392cfb
|
299 |
} |
c1c201200
|
300 301 302 |
rq->sense = sense; rq->sense_len = 0; |
3d6392cfb
|
303 |
return rq; |
2c9ecdf40
|
304 |
out: |
9f5de6b10
|
305 306 |
if (rq->cmd != rq->__cmd) kfree(rq->cmd); |
2c9ecdf40
|
307 308 309 310 311 312 |
blk_put_request(rq); if (next_rq) { blk_rq_unmap_user(next_rq->bio); blk_put_request(next_rq); } return ERR_PTR(ret); |
3d6392cfb
|
313 314 315 316 317 318 319 320 321 322 323 |
} /* * async completion call-back from the block layer, when scsi/ide/whatever * calls end_that_request_last() on a request */ static void bsg_rq_end_io(struct request *rq, int uptodate) { struct bsg_command *bc = rq->end_io_data; struct bsg_device *bd = bc->bd; unsigned long flags; |
5309cb38d
|
324 325 326 |
dprintk("%s: finished rq %p bc %p, bio %p stat %d ", bd->name, rq, bc, bc->bio, uptodate); |
3d6392cfb
|
327 328 329 330 |
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); spin_lock_irqsave(&bd->lock, flags); |
25fd16430
|
331 332 |
list_move_tail(&bc->list, &bd->done_list); bd->done_cmds++; |
3d6392cfb
|
333 |
spin_unlock_irqrestore(&bd->lock, flags); |
25fd16430
|
334 335 |
wake_up(&bd->wq_done); |
3d6392cfb
|
336 337 338 339 340 341 |
} /* * do final setup of a 'bc' and submit the matching 'rq' to the block * layer for io */ |
165125e1e
|
342 |
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, |
3d6392cfb
|
343 344 |
struct bsg_command *bc, struct request *rq) { |
05378940c
|
345 |
int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); |
3d6392cfb
|
346 347 348 349 350 |
/* * add bc command to busy queue and submit rq for io */ bc->rq = rq; bc->bio = rq->bio; |
2c9ecdf40
|
351 352 |
if (rq->next_rq) bc->bidi_bio = rq->next_rq->bio; |
3d6392cfb
|
353 354 355 356 357 358 359 360 361 |
bc->hdr.duration = jiffies; spin_lock_irq(&bd->lock); list_add_tail(&bc->list, &bd->busy_list); spin_unlock_irq(&bd->lock); dprintk("%s: queueing rq %p, bc %p ", bd->name, rq, bc); rq->end_io_data = bc; |
05378940c
|
362 |
blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); |
3d6392cfb
|
363 |
} |
25fd16430
|
364 |
static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) |
3d6392cfb
|
365 366 367 368 369 |
{ struct bsg_command *bc = NULL; spin_lock_irq(&bd->lock); if (bd->done_cmds) { |
43ac9e62c
|
370 |
bc = list_first_entry(&bd->done_list, struct bsg_command, list); |
25fd16430
|
371 372 |
list_del(&bc->list); bd->done_cmds--; |
3d6392cfb
|
373 374 375 376 377 378 379 380 381 |
} spin_unlock_irq(&bd->lock); return bc; } /* * Get a finished command from the done list */ |
e7d721732
|
382 |
static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) |
3d6392cfb
|
383 384 385 386 387 388 389 390 |
{ struct bsg_command *bc; int ret; do { bc = bsg_next_done_cmd(bd); if (bc) break; |
e7d721732
|
391 392 393 394 395 396 |
if (!test_bit(BSG_F_BLOCK, &bd->flags)) { bc = ERR_PTR(-EAGAIN); break; } ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); |
3d6392cfb
|
397 |
if (ret) { |
e7d721732
|
398 |
bc = ERR_PTR(-ERESTARTSYS); |
3d6392cfb
|
399 400 401 402 403 404 405 406 407 |
break; } } while (1); dprintk("%s: returning done %p ", bd->name, bc); return bc; } |
70e36ecea
|
408 |
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, |
2c9ecdf40
|
409 |
struct bio *bio, struct bio *bidi_bio) |
70e36ecea
|
410 411 |
{ int ret = 0; |
c1c201200
|
412 413 |
dprintk("rq %p bio %p 0x%x ", rq, bio, rq->errors); |
70e36ecea
|
414 415 416 |
/* * fill in all the output members */ |
478971600
|
417 |
hdr->device_status = rq->errors & 0xff; |
70e36ecea
|
418 419 420 421 422 |
hdr->transport_status = host_byte(rq->errors); hdr->driver_status = driver_byte(rq->errors); hdr->info = 0; if (hdr->device_status || hdr->transport_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; |
70e36ecea
|
423 424 425 |
hdr->response_len = 0; if (rq->sense_len && hdr->response) { |
25fd16430
|
426 427 |
int len = min_t(unsigned int, hdr->max_response_len, rq->sense_len); |
70e36ecea
|
428 |
|
2b727c630
|
429 |
ret = copy_to_user((void __user *)(unsigned long)hdr->response, |
70e36ecea
|
430 431 432 433 434 435 |
rq->sense, len); if (!ret) hdr->response_len = len; else ret = -EFAULT; } |
2c9ecdf40
|
436 |
if (rq->next_rq) { |
c3a4d78c5
|
437 438 |
hdr->dout_resid = rq->resid_len; hdr->din_resid = rq->next_rq->resid_len; |
2c9ecdf40
|
439 440 |
blk_rq_unmap_user(bidi_bio); blk_put_request(rq->next_rq); |
0c6a89ba6
|
441 |
} else if (rq_data_dir(rq) == READ) |
c3a4d78c5
|
442 |
hdr->din_resid = rq->resid_len; |
0c6a89ba6
|
443 |
else |
c3a4d78c5
|
444 |
hdr->dout_resid = rq->resid_len; |
2c9ecdf40
|
445 |
|
2d507a01d
|
446 447 448 449 450 451 452 453 |
/* * If the request generated a negative error number, return it * (providing we aren't already returning an error); if it's * just a protocol response (i.e. non negative), that gets * processed above. */ if (!ret && rq->errors < 0) ret = rq->errors; |
70e36ecea
|
454 |
blk_rq_unmap_user(bio); |
9f5de6b10
|
455 456 |
if (rq->cmd != rq->__cmd) kfree(rq->cmd); |
70e36ecea
|
457 458 459 460 |
blk_put_request(rq); return ret; } |
3d6392cfb
|
461 462 463 464 465 466 467 |
static int bsg_complete_all_commands(struct bsg_device *bd) { struct bsg_command *bc; int ret, tret; dprintk("%s: entered ", bd->name); |
3d6392cfb
|
468 469 470 471 472 |
/* * wait for all commands to complete */ ret = 0; do { |
25fd16430
|
473 |
ret = bsg_io_schedule(bd); |
3d6392cfb
|
474 475 476 477 478 479 480 481 482 483 484 485 486 487 |
/* * look for -ENODATA specifically -- we'll sometimes get * -ERESTARTSYS when we've taken a signal, but we can't * return until we're done freeing the queue, so ignore * it. The signal will get handled when we're done freeing * the bsg_device. */ } while (ret != -ENODATA); /* * discard done commands */ ret = 0; do { |
e7d721732
|
488 489 490 |
spin_lock_irq(&bd->lock); if (!bd->queued_cmds) { spin_unlock_irq(&bd->lock); |
3d6392cfb
|
491 492 |
break; } |
efba1a31f
|
493 |
spin_unlock_irq(&bd->lock); |
3d6392cfb
|
494 |
|
e7d721732
|
495 496 497 |
bc = bsg_get_done_cmd(bd); if (IS_ERR(bc)) break; |
2c9ecdf40
|
498 499 |
tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); |
3d6392cfb
|
500 501 502 503 504 505 506 507 |
if (!ret) ret = tret; bsg_free_command(bc); } while (1); return ret; } |
25fd16430
|
508 |
static int |
e7d721732
|
509 510 |
__bsg_read(char __user *buf, size_t count, struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read) |
3d6392cfb
|
511 512 513 |
{ struct bsg_command *bc; int nr_commands, ret; |
70e36ecea
|
514 |
if (count % sizeof(struct sg_io_v4)) |
3d6392cfb
|
515 516 517 |
return -EINVAL; ret = 0; |
70e36ecea
|
518 |
nr_commands = count / sizeof(struct sg_io_v4); |
3d6392cfb
|
519 |
while (nr_commands) { |
e7d721732
|
520 |
bc = bsg_get_done_cmd(bd); |
3d6392cfb
|
521 522 523 524 525 526 527 528 529 530 |
if (IS_ERR(bc)) { ret = PTR_ERR(bc); break; } /* * this is the only case where we need to copy data back * after completing the request. so do that here, * bsg_complete_work() cannot do that for us */ |
2c9ecdf40
|
531 532 |
ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); |
3d6392cfb
|
533 |
|
25fd16430
|
534 |
if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) |
3d6392cfb
|
535 536 537 538 539 540 |
ret = -EFAULT; bsg_free_command(bc); if (ret) break; |
70e36ecea
|
541 542 |
buf += sizeof(struct sg_io_v4); *bytes_read += sizeof(struct sg_io_v4); |
3d6392cfb
|
543 544 545 546 547 548 549 550 551 552 553 554 555 |
nr_commands--; } return ret; } static inline void bsg_set_block(struct bsg_device *bd, struct file *file) { if (file->f_flags & O_NONBLOCK) clear_bit(BSG_F_BLOCK, &bd->flags); else set_bit(BSG_F_BLOCK, &bd->flags); } |
25fd16430
|
556 557 558 |
/* * Check if the error is a "real" error that we should return. */ |
3d6392cfb
|
559 560 561 562 563 564 565 566 567 568 569 570 571 572 |
static inline int err_block_err(int ret) { if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) return 1; return 0; } static ssize_t bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; int ret; ssize_t bytes_read; |
9e69fbb53
|
573 574 |
dprintk("%s: read %Zd bytes ", bd->name, count); |
3d6392cfb
|
575 576 |
bsg_set_block(bd, file); |
0b07de85a
|
577 |
|
3d6392cfb
|
578 |
bytes_read = 0; |
e7d721732
|
579 |
ret = __bsg_read(buf, count, bd, NULL, &bytes_read); |
3d6392cfb
|
580 |
*ppos = bytes_read; |
44194e3e8
|
581 |
if (!bytes_read || err_block_err(ret)) |
3d6392cfb
|
582 583 584 585 |
bytes_read = ret; return bytes_read; } |
25fd16430
|
586 |
static int __bsg_write(struct bsg_device *bd, const char __user *buf, |
aeb5d7270
|
587 588 |
size_t count, ssize_t *bytes_written, fmode_t has_write_perm) |
3d6392cfb
|
589 590 591 592 |
{ struct bsg_command *bc; struct request *rq; int ret, nr_commands; |
70e36ecea
|
593 |
if (count % sizeof(struct sg_io_v4)) |
3d6392cfb
|
594 |
return -EINVAL; |
70e36ecea
|
595 |
nr_commands = count / sizeof(struct sg_io_v4); |
3d6392cfb
|
596 597 598 599 |
rq = NULL; bc = NULL; ret = 0; while (nr_commands) { |
165125e1e
|
600 |
struct request_queue *q = bd->queue; |
3d6392cfb
|
601 |
|
e7d721732
|
602 |
bc = bsg_alloc_command(bd); |
3d6392cfb
|
603 604 605 606 607 |
if (IS_ERR(bc)) { ret = PTR_ERR(bc); bc = NULL; break; } |
3d6392cfb
|
608 609 610 611 612 613 614 615 |
if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { ret = -EFAULT; break; } /* * get a request, fill in the blanks, and add to request queue */ |
c1c201200
|
616 |
rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); |
3d6392cfb
|
617 618 619 620 621 622 623 624 625 626 |
if (IS_ERR(rq)) { ret = PTR_ERR(rq); rq = NULL; break; } bsg_add_command(bd, q, bc, rq); bc = NULL; rq = NULL; nr_commands--; |
70e36ecea
|
627 |
buf += sizeof(struct sg_io_v4); |
25fd16430
|
628 |
*bytes_written += sizeof(struct sg_io_v4); |
3d6392cfb
|
629 |
} |
3d6392cfb
|
630 631 632 633 634 635 636 637 638 639 |
if (bc) bsg_free_command(bc); return ret; } static ssize_t bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; |
25fd16430
|
640 |
ssize_t bytes_written; |
3d6392cfb
|
641 |
int ret; |
9e69fbb53
|
642 643 |
dprintk("%s: write %Zd bytes ", bd->name, count); |
3d6392cfb
|
644 645 |
bsg_set_block(bd, file); |
3d6392cfb
|
646 |
|
25fd16430
|
647 |
bytes_written = 0; |
abf543937
|
648 649 |
ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode & FMODE_WRITE); |
25fd16430
|
650 |
*ppos = bytes_written; |
3d6392cfb
|
651 652 653 654 |
/* * return bytes written on non-fatal errors */ |
44194e3e8
|
655 |
if (!bytes_written || err_block_err(ret)) |
25fd16430
|
656 |
bytes_written = ret; |
3d6392cfb
|
657 |
|
25fd16430
|
658 659 660 |
dprintk("%s: returning %Zd ", bd->name, bytes_written); return bytes_written; |
3d6392cfb
|
661 |
} |
3d6392cfb
|
662 663 |
static struct bsg_device *bsg_alloc_device(void) { |
3d6392cfb
|
664 |
struct bsg_device *bd; |
3d6392cfb
|
665 666 667 668 669 670 |
bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); if (unlikely(!bd)) return NULL; spin_lock_init(&bd->lock); |
5309cb38d
|
671 |
bd->max_queue = BSG_DEFAULT_CMDS; |
3d6392cfb
|
672 673 674 675 676 677 678 679 |
INIT_LIST_HEAD(&bd->busy_list); INIT_LIST_HEAD(&bd->done_list); INIT_HLIST_NODE(&bd->dev_list); init_waitqueue_head(&bd->wq_free); init_waitqueue_head(&bd->wq_done); return bd; |
3d6392cfb
|
680 |
} |
97f46ae45
|
681 682 683 684 |
static void bsg_kref_release_function(struct kref *kref) { struct bsg_class_device *bcd = container_of(kref, struct bsg_class_device, ref); |
8df5fc042
|
685 |
struct device *parent = bcd->parent; |
97f46ae45
|
686 687 688 |
if (bcd->release) bcd->release(bcd->parent); |
8df5fc042
|
689 |
put_device(parent); |
97f46ae45
|
690 |
} |
3d6392cfb
|
691 692 |
static int bsg_put_device(struct bsg_device *bd) { |
97f46ae45
|
693 694 |
int ret = 0, do_free; struct request_queue *q = bd->queue; |
3d6392cfb
|
695 696 |
mutex_lock(&bsg_mutex); |
97f46ae45
|
697 |
do_free = atomic_dec_and_test(&bd->ref_count); |
3f27e3ed1
|
698 699 |
if (!do_free) { mutex_unlock(&bsg_mutex); |
3d6392cfb
|
700 |
goto out; |
3f27e3ed1
|
701 702 703 704 |
} hlist_del(&bd->dev_list); mutex_unlock(&bsg_mutex); |
3d6392cfb
|
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 |
dprintk("%s: tearing down ", bd->name); /* * close can always block */ set_bit(BSG_F_BLOCK, &bd->flags); /* * correct error detection baddies here again. it's the responsibility * of the app to properly reap commands before close() if it wants * fool-proof error detection */ ret = bsg_complete_all_commands(bd); |
5309cb38d
|
720 |
kfree(bd); |
3d6392cfb
|
721 |
out: |
97f46ae45
|
722 723 724 |
kref_put(&q->bsg_dev.ref, bsg_kref_release_function); if (do_free) blk_put_queue(q); |
3d6392cfb
|
725 726 727 728 |
return ret; } static struct bsg_device *bsg_add_device(struct inode *inode, |
d351af01b
|
729 |
struct request_queue *rq, |
3d6392cfb
|
730 731 |
struct file *file) { |
25fd16430
|
732 |
struct bsg_device *bd; |
c3ff1b90d
|
733 |
int ret; |
3d6392cfb
|
734 735 736 |
#ifdef BSG_DEBUG unsigned char buf[32]; #endif |
c3ff1b90d
|
737 738 739 |
ret = blk_get_queue(rq); if (ret) return ERR_PTR(-ENXIO); |
3d6392cfb
|
740 741 |
bd = bsg_alloc_device(); |
c3ff1b90d
|
742 743 |
if (!bd) { blk_put_queue(rq); |
3d6392cfb
|
744 |
return ERR_PTR(-ENOMEM); |
c3ff1b90d
|
745 |
} |
3d6392cfb
|
746 |
|
d351af01b
|
747 |
bd->queue = rq; |
0b07de85a
|
748 |
|
3d6392cfb
|
749 750 751 |
bsg_set_block(bd, file); atomic_set(&bd->ref_count, 1); |
3d6392cfb
|
752 |
mutex_lock(&bsg_mutex); |
842ea771c
|
753 |
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); |
3d6392cfb
|
754 |
|
3ada8b7e9
|
755 |
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); |
3d6392cfb
|
756 757 |
dprintk("bound to <%s>, max queue %d ", |
9e69fbb53
|
758 |
format_dev_t(buf, inode->i_rdev), bd->max_queue); |
3d6392cfb
|
759 760 761 762 |
mutex_unlock(&bsg_mutex); return bd; } |
842ea771c
|
763 |
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) |
3d6392cfb
|
764 |
{ |
43ac9e62c
|
765 |
struct bsg_device *bd; |
3d6392cfb
|
766 767 768 |
struct hlist_node *entry; mutex_lock(&bsg_mutex); |
43ac9e62c
|
769 |
hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { |
842ea771c
|
770 |
if (bd->queue == q) { |
3d6392cfb
|
771 |
atomic_inc(&bd->ref_count); |
43ac9e62c
|
772 |
goto found; |
3d6392cfb
|
773 |
} |
3d6392cfb
|
774 |
} |
43ac9e62c
|
775 776 |
bd = NULL; found: |
3d6392cfb
|
777 778 779 780 781 782 |
mutex_unlock(&bsg_mutex); return bd; } static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) { |
598443a21
|
783 784 |
struct bsg_device *bd; struct bsg_class_device *bcd; |
3d6392cfb
|
785 |
|
3d6392cfb
|
786 787 788 |
/* * find the class device */ |
3d6392cfb
|
789 |
mutex_lock(&bsg_mutex); |
598443a21
|
790 |
bcd = idr_find(&bsg_minor_idr, iminor(inode)); |
d45ac4fa8
|
791 |
if (bcd) |
97f46ae45
|
792 |
kref_get(&bcd->ref); |
3d6392cfb
|
793 794 795 796 |
mutex_unlock(&bsg_mutex); if (!bcd) return ERR_PTR(-ENODEV); |
842ea771c
|
797 |
bd = __bsg_get_device(iminor(inode), bcd->queue); |
d45ac4fa8
|
798 799 800 801 802 |
if (bd) return bd; bd = bsg_add_device(inode, bcd->queue, file); if (IS_ERR(bd)) |
97f46ae45
|
803 |
kref_put(&bcd->ref, bsg_kref_release_function); |
d45ac4fa8
|
804 805 |
return bd; |
3d6392cfb
|
806 807 808 809 |
} static int bsg_open(struct inode *inode, struct file *file) { |
75bd2ef14
|
810 |
struct bsg_device *bd; |
75bd2ef14
|
811 |
bd = bsg_get_device(inode, file); |
3d6392cfb
|
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 |
if (IS_ERR(bd)) return PTR_ERR(bd); file->private_data = bd; return 0; } static int bsg_release(struct inode *inode, struct file *file) { struct bsg_device *bd = file->private_data; file->private_data = NULL; return bsg_put_device(bd); } static unsigned int bsg_poll(struct file *file, poll_table *wait) { struct bsg_device *bd = file->private_data; unsigned int mask = 0; poll_wait(file, &bd->wq_done, wait); poll_wait(file, &bd->wq_free, wait); spin_lock_irq(&bd->lock); if (!list_empty(&bd->done_list)) mask |= POLLIN | POLLRDNORM; |
80ceb0571
|
839 |
if (bd->queued_cmds < bd->max_queue) |
3d6392cfb
|
840 841 842 843 844 |
mask |= POLLOUT; spin_unlock_irq(&bd->lock); return mask; } |
25fd16430
|
845 |
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
3d6392cfb
|
846 847 848 |
{ struct bsg_device *bd = file->private_data; int __user *uarg = (int __user *) arg; |
2d507a01d
|
849 |
int ret; |
3d6392cfb
|
850 |
|
3d6392cfb
|
851 852 853 854 855 856 |
switch (cmd) { /* * our own ioctls */ case SG_GET_COMMAND_Q: return put_user(bd->max_queue, uarg); |
5309cb38d
|
857 |
case SG_SET_COMMAND_Q: { |
3d6392cfb
|
858 859 860 861 |
int queue; if (get_user(queue, uarg)) return -EFAULT; |
5309cb38d
|
862 |
if (queue < 1) |
3d6392cfb
|
863 |
return -EINVAL; |
5309cb38d
|
864 |
spin_lock_irq(&bd->lock); |
3d6392cfb
|
865 |
bd->max_queue = queue; |
5309cb38d
|
866 |
spin_unlock_irq(&bd->lock); |
3d6392cfb
|
867 868 869 870 871 872 873 874 875 876 877 878 879 880 |
return 0; } /* * SCSI/sg ioctls */ case SG_GET_VERSION_NUM: case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SG_SET_TIMEOUT: case SG_GET_TIMEOUT: case SG_GET_RESERVED_SIZE: case SG_SET_RESERVED_SIZE: case SG_EMULATED_HOST: |
3d6392cfb
|
881 882 |
case SCSI_IOCTL_SEND_COMMAND: { void __user *uarg = (void __user *) arg; |
74f3c8aff
|
883 |
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); |
3d6392cfb
|
884 |
} |
10e8855b9
|
885 886 |
case SG_IO: { struct request *rq; |
2c9ecdf40
|
887 |
struct bio *bio, *bidi_bio = NULL; |
10e8855b9
|
888 |
struct sg_io_v4 hdr; |
05378940c
|
889 |
int at_head; |
c1c201200
|
890 |
u8 sense[SCSI_SENSE_BUFFERSIZE]; |
10e8855b9
|
891 892 893 |
if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; |
c1c201200
|
894 |
rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); |
10e8855b9
|
895 896 897 898 |
if (IS_ERR(rq)) return PTR_ERR(rq); bio = rq->bio; |
2c9ecdf40
|
899 900 |
if (rq->next_rq) bidi_bio = rq->next_rq->bio; |
05378940c
|
901 902 903 |
at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); blk_execute_rq(bd->queue, NULL, rq, at_head); |
2d507a01d
|
904 |
ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); |
10e8855b9
|
905 906 907 |
if (copy_to_user(uarg, &hdr, sizeof(hdr))) return -EFAULT; |
b711afa69
|
908 |
|
2d507a01d
|
909 |
return ret; |
10e8855b9
|
910 |
} |
3d6392cfb
|
911 912 913 914 915 916 917 918 919 920 921 |
/* * block device ioctls */ default: #if 0 return ioctl_by_bdev(bd->bdev, cmd, arg); #else return -ENOTTY; #endif } } |
7344be053
|
922 |
static const struct file_operations bsg_fops = { |
3d6392cfb
|
923 924 925 926 927 |
.read = bsg_read, .write = bsg_write, .poll = bsg_poll, .open = bsg_open, .release = bsg_release, |
25fd16430
|
928 |
.unlocked_ioctl = bsg_ioctl, |
3d6392cfb
|
929 |
.owner = THIS_MODULE, |
6038f373a
|
930 |
.llseek = default_llseek, |
3d6392cfb
|
931 |
}; |
d351af01b
|
932 |
void bsg_unregister_queue(struct request_queue *q) |
3d6392cfb
|
933 |
{ |
d351af01b
|
934 |
struct bsg_class_device *bcd = &q->bsg_dev; |
3d6392cfb
|
935 |
|
df468820b
|
936 937 |
if (!bcd->class_dev) return; |
3d6392cfb
|
938 939 |
mutex_lock(&bsg_mutex); |
598443a21
|
940 |
idr_remove(&bsg_minor_idr, bcd->minor); |
d351af01b
|
941 |
sysfs_remove_link(&q->kobj, "bsg"); |
ee959b00c
|
942 |
device_unregister(bcd->class_dev); |
3d6392cfb
|
943 |
bcd->class_dev = NULL; |
97f46ae45
|
944 |
kref_put(&bcd->ref, bsg_kref_release_function); |
3d6392cfb
|
945 946 |
mutex_unlock(&bsg_mutex); } |
4cf0723ac
|
947 |
EXPORT_SYMBOL_GPL(bsg_unregister_queue); |
3d6392cfb
|
948 |
|
97f46ae45
|
949 950 |
int bsg_register_queue(struct request_queue *q, struct device *parent, const char *name, void (*release)(struct device *)) |
3d6392cfb
|
951 |
{ |
598443a21
|
952 |
struct bsg_class_device *bcd; |
3d6392cfb
|
953 |
dev_t dev; |
598443a21
|
954 |
int ret, minor; |
ee959b00c
|
955 |
struct device *class_dev = NULL; |
39dca558a
|
956 957 958 959 960 |
const char *devname; if (name) devname = name; else |
3ada8b7e9
|
961 |
devname = dev_name(parent); |
3d6392cfb
|
962 963 964 965 966 967 |
/* * we need a proper transport to send commands, not a stacked device */ if (!q->request_fn) return 0; |
d351af01b
|
968 |
bcd = &q->bsg_dev; |
3d6392cfb
|
969 |
memset(bcd, 0, sizeof(*bcd)); |
3d6392cfb
|
970 971 |
mutex_lock(&bsg_mutex); |
292b7f271
|
972 |
|
598443a21
|
973 974 975 976 |
ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); if (!ret) { ret = -ENOMEM; goto unlock; |
292b7f271
|
977 |
} |
598443a21
|
978 979 980 |
ret = idr_get_new(&bsg_minor_idr, bcd, &minor); if (ret < 0) goto unlock; |
292b7f271
|
981 |
|
598443a21
|
982 983 984 985 986 987 988 989 |
if (minor >= BSG_MAX_DEVS) { printk(KERN_ERR "bsg: too many bsg devices "); ret = -EINVAL; goto remove_idr; } bcd->minor = minor; |
d351af01b
|
990 |
bcd->queue = q; |
97f46ae45
|
991 992 993 |
bcd->parent = get_device(parent); bcd->release = release; kref_init(&bcd->ref); |
46f6ef4af
|
994 |
dev = MKDEV(bsg_major, bcd->minor); |
1ff9f542e
|
995 |
class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); |
4e2872d6b
|
996 997 |
if (IS_ERR(class_dev)) { ret = PTR_ERR(class_dev); |
598443a21
|
998 |
goto put_dev; |
4e2872d6b
|
999 1000 |
} bcd->class_dev = class_dev; |
abce891a1
|
1001 |
if (q->kobj.sd) { |
4e2872d6b
|
1002 1003 |
ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); if (ret) |
598443a21
|
1004 |
goto unregister_class_dev; |
4e2872d6b
|
1005 |
} |
3d6392cfb
|
1006 1007 |
mutex_unlock(&bsg_mutex); return 0; |
6826ee4fd
|
1008 |
|
598443a21
|
1009 |
unregister_class_dev: |
ee959b00c
|
1010 |
device_unregister(class_dev); |
598443a21
|
1011 |
put_dev: |
97f46ae45
|
1012 |
put_device(parent); |
598443a21
|
1013 1014 1015 |
remove_idr: idr_remove(&bsg_minor_idr, minor); unlock: |
264a04721
|
1016 |
mutex_unlock(&bsg_mutex); |
4e2872d6b
|
1017 1018 |
return ret; } |
4cf0723ac
|
1019 |
EXPORT_SYMBOL_GPL(bsg_register_queue); |
4e2872d6b
|
1020 |
|
7e7654a92
|
1021 |
static struct cdev bsg_cdev; |
292b7f271
|
1022 |
|
e454cea20
|
1023 |
static char *bsg_devnode(struct device *dev, mode_t *mode) |
2bdf91491
|
1024 1025 1026 |
{ return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); } |
3d6392cfb
|
1027 1028 1029 |
static int __init bsg_init(void) { int ret, i; |
46f6ef4af
|
1030 |
dev_t devid; |
3d6392cfb
|
1031 |
|
5309cb38d
|
1032 |
bsg_cmd_cachep = kmem_cache_create("bsg_cmd", |
20c2df83d
|
1033 |
sizeof(struct bsg_command), 0, 0, NULL); |
5309cb38d
|
1034 1035 1036 1037 1038 |
if (!bsg_cmd_cachep) { printk(KERN_ERR "bsg: failed creating slab cache "); return -ENOMEM; } |
25fd16430
|
1039 |
for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) |
3d6392cfb
|
1040 1041 1042 |
INIT_HLIST_HEAD(&bsg_device_list[i]); bsg_class = class_create(THIS_MODULE, "bsg"); |
5309cb38d
|
1043 |
if (IS_ERR(bsg_class)) { |
9b9f770ce
|
1044 1045 |
ret = PTR_ERR(bsg_class); goto destroy_kmemcache; |
5309cb38d
|
1046 |
} |
e454cea20
|
1047 |
bsg_class->devnode = bsg_devnode; |
3d6392cfb
|
1048 |
|
46f6ef4af
|
1049 |
ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); |
9b9f770ce
|
1050 1051 |
if (ret) goto destroy_bsg_class; |
292b7f271
|
1052 |
|
46f6ef4af
|
1053 |
bsg_major = MAJOR(devid); |
292b7f271
|
1054 |
cdev_init(&bsg_cdev, &bsg_fops); |
46f6ef4af
|
1055 |
ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); |
9b9f770ce
|
1056 1057 |
if (ret) goto unregister_chrdev; |
3d6392cfb
|
1058 |
|
5d3a8cd34
|
1059 |
printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION |
0ed081ce2
|
1060 1061 |
" loaded (major %d) ", bsg_major); |
3d6392cfb
|
1062 |
return 0; |
9b9f770ce
|
1063 1064 1065 1066 1067 1068 1069 |
unregister_chrdev: unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); destroy_bsg_class: class_destroy(bsg_class); destroy_kmemcache: kmem_cache_destroy(bsg_cmd_cachep); return ret; |
3d6392cfb
|
1070 1071 1072 |
} MODULE_AUTHOR("Jens Axboe"); |
0ed081ce2
|
1073 |
MODULE_DESCRIPTION(BSG_DESCRIPTION); |
3d6392cfb
|
1074 |
MODULE_LICENSE("GPL"); |
4e2872d6b
|
1075 |
device_initcall(bsg_init); |