Blame view
block/bsg.c
23.4 KB
3d6392cfb
|
1 |
/* |
0c6a89ba6
|
2 |
* bsg.c - block layer implementation of the sg v4 interface |
3d6392cfb
|
3 4 5 6 7 8 9 10 11 |
* * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> * * This file is subject to the terms and conditions of the GNU General Public * License version 2. See the file "COPYING" in the main directory of this * archive for more details. * */ |
3d6392cfb
|
12 13 14 15 16 17 18 19 |
#include <linux/module.h> #include <linux/init.h> #include <linux/file.h> #include <linux/blkdev.h> #include <linux/poll.h> #include <linux/cdev.h> #include <linux/percpu.h> #include <linux/uio.h> |
598443a21
|
20 |
#include <linux/idr.h> |
3d6392cfb
|
21 |
#include <linux/bsg.h> |
75bd2ef14
|
22 |
#include <linux/smp_lock.h> |
3d6392cfb
|
23 24 25 26 |
#include <scsi/scsi.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsi_cmnd.h> |
4e2872d6b
|
27 28 |
#include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> |
3d6392cfb
|
29 |
#include <scsi/sg.h> |
0ed081ce2
|
30 31 |
#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" #define BSG_VERSION "0.4" |
3d6392cfb
|
32 |
|
3d6392cfb
|
33 |
struct bsg_device { |
165125e1e
|
34 |
struct request_queue *queue; |
3d6392cfb
|
35 36 37 38 39 |
spinlock_t lock; struct list_head busy_list; struct list_head done_list; struct hlist_node dev_list; atomic_t ref_count; |
3d6392cfb
|
40 41 |
int queued_cmds; int done_cmds; |
3d6392cfb
|
42 43 |
wait_queue_head_t wq_done; wait_queue_head_t wq_free; |
d351af01b
|
44 |
char name[BUS_ID_SIZE]; |
3d6392cfb
|
45 46 |
int max_queue; unsigned long flags; |
0b07de85a
|
47 48 |
struct blk_scsi_cmd_filter *cmd_filter; mode_t *f_mode; |
3d6392cfb
|
49 50 51 52 |
}; enum { BSG_F_BLOCK = 1, |
3d6392cfb
|
53 |
}; |
5309cb38d
|
54 |
#define BSG_DEFAULT_CMDS 64 |
292b7f271
|
55 |
#define BSG_MAX_DEVS 32768 |
3d6392cfb
|
56 57 58 59 |
#undef BSG_DEBUG #ifdef BSG_DEBUG |
24c03d47d
|
60 |
#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) |
3d6392cfb
|
61 62 63 |
#else #define dprintk(fmt, args...) #endif |
3d6392cfb
|
64 |
static DEFINE_MUTEX(bsg_mutex); |
598443a21
|
65 |
static DEFINE_IDR(bsg_minor_idr); |
3d6392cfb
|
66 |
|
25fd16430
|
67 |
#define BSG_LIST_ARRAY_SIZE 8 |
25fd16430
|
68 |
static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; |
3d6392cfb
|
69 70 |
static struct class *bsg_class; |
46f6ef4af
|
71 |
static int bsg_major; |
3d6392cfb
|
72 |
|
5309cb38d
|
73 |
static struct kmem_cache *bsg_cmd_cachep; |
3d6392cfb
|
74 75 76 77 78 79 80 81 |
/* * our internal command type */ struct bsg_command { struct bsg_device *bd; struct list_head list; struct request *rq; struct bio *bio; |
2c9ecdf40
|
82 |
struct bio *bidi_bio; |
3d6392cfb
|
83 |
int err; |
70e36ecea
|
84 |
struct sg_io_v4 hdr; |
3d6392cfb
|
85 86 87 88 89 90 |
char sense[SCSI_SENSE_BUFFERSIZE]; }; static void bsg_free_command(struct bsg_command *bc) { struct bsg_device *bd = bc->bd; |
3d6392cfb
|
91 |
unsigned long flags; |
5309cb38d
|
92 |
kmem_cache_free(bsg_cmd_cachep, bc); |
3d6392cfb
|
93 94 95 |
spin_lock_irqsave(&bd->lock, flags); bd->queued_cmds--; |
3d6392cfb
|
96 97 98 99 |
spin_unlock_irqrestore(&bd->lock, flags); wake_up(&bd->wq_free); } |
e7d721732
|
100 |
static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) |
3d6392cfb
|
101 |
{ |
e7d721732
|
102 |
struct bsg_command *bc = ERR_PTR(-EINVAL); |
3d6392cfb
|
103 104 105 106 107 |
spin_lock_irq(&bd->lock); if (bd->queued_cmds >= bd->max_queue) goto out; |
3d6392cfb
|
108 |
bd->queued_cmds++; |
3d6392cfb
|
109 |
spin_unlock_irq(&bd->lock); |
25fd16430
|
110 |
bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); |
5309cb38d
|
111 112 |
if (unlikely(!bc)) { spin_lock_irq(&bd->lock); |
7e75d7308
|
113 |
bd->queued_cmds--; |
e7d721732
|
114 |
bc = ERR_PTR(-ENOMEM); |
7e75d7308
|
115 |
goto out; |
5309cb38d
|
116 |
} |
3d6392cfb
|
117 118 |
bc->bd = bd; INIT_LIST_HEAD(&bc->list); |
5309cb38d
|
119 120 |
dprintk("%s: returning free cmd %p ", bd->name, bc); |
3d6392cfb
|
121 122 |
return bc; out: |
3d6392cfb
|
123 124 125 |
spin_unlock_irq(&bd->lock); return bc; } |
1c1133e1f
|
126 |
static inline struct hlist_head *bsg_dev_idx_hash(int index) |
3d6392cfb
|
127 |
{ |
1c1133e1f
|
128 |
return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; |
3d6392cfb
|
129 |
} |
25fd16430
|
130 |
static int bsg_io_schedule(struct bsg_device *bd) |
3d6392cfb
|
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
{ DEFINE_WAIT(wait); int ret = 0; spin_lock_irq(&bd->lock); BUG_ON(bd->done_cmds > bd->queued_cmds); /* * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no * work to do", even though we return -ENOSPC after this same test * during bsg_write() -- there, it means our buffer can't have more * bsg_commands added to it, thus has no space left. */ if (bd->done_cmds == bd->queued_cmds) { ret = -ENODATA; goto unlock; } if (!test_bit(BSG_F_BLOCK, &bd->flags)) { ret = -EAGAIN; goto unlock; } |
25fd16430
|
154 |
prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); |
3d6392cfb
|
155 156 157 |
spin_unlock_irq(&bd->lock); io_schedule(); finish_wait(&bd->wq_done, &wait); |
3d6392cfb
|
158 159 160 161 162 |
return ret; unlock: spin_unlock_irq(&bd->lock); return ret; } |
165125e1e
|
163 |
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, |
0b07de85a
|
164 |
struct sg_io_v4 *hdr, struct bsg_device *bd) |
70e36ecea
|
165 |
{ |
9f5de6b10
|
166 167 168 169 170 |
if (hdr->request_len > BLK_MAX_CDB) { rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); if (!rq->cmd) return -ENOMEM; } |
70e36ecea
|
171 172 173 174 |
if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, hdr->request_len)) return -EFAULT; |
15d10b611
|
175 176 |
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { |
0b07de85a
|
177 178 |
if (blk_cmd_filter_verify_command(bd->cmd_filter, rq->cmd, bd->f_mode)) |
15d10b611
|
179 180 |
return -EPERM; } else if (!capable(CAP_SYS_RAWIO)) |
70e36ecea
|
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
return -EPERM; /* * fill in request structure */ rq->cmd_len = hdr->request_len; rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->timeout = (hdr->timeout * HZ) / 1000; if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_SG_TIMEOUT; return 0; } |
3d6392cfb
|
197 |
/* |
70e36ecea
|
198 |
* Check if sg_io_v4 from user is allowed and valid |
3d6392cfb
|
199 200 |
*/ static int |
165125e1e
|
201 |
bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) |
3d6392cfb
|
202 |
{ |
15d10b611
|
203 |
int ret = 0; |
70e36ecea
|
204 |
if (hdr->guard != 'Q') |
3d6392cfb
|
205 |
return -EINVAL; |
70e36ecea
|
206 207 |
if (hdr->dout_xfer_len > (q->max_sectors << 9) || hdr->din_xfer_len > (q->max_sectors << 9)) |
3d6392cfb
|
208 |
return -EIO; |
15d10b611
|
209 210 211 212 213 214 215 216 217 218 219 220 221 |
switch (hdr->protocol) { case BSG_PROTOCOL_SCSI: switch (hdr->subprotocol) { case BSG_SUB_PROTOCOL_SCSI_CMD: case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: break; default: ret = -EINVAL; } break; default: ret = -EINVAL; } |
70e36ecea
|
222 |
|
70e36ecea
|
223 |
*rw = hdr->dout_xfer_len ? WRITE : READ; |
15d10b611
|
224 |
return ret; |
3d6392cfb
|
225 226 227 |
} /* |
70e36ecea
|
228 |
* map sg_io_v4 to a request. |
3d6392cfb
|
229 230 |
*/ static struct request * |
70e36ecea
|
231 |
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) |
3d6392cfb
|
232 |
{ |
165125e1e
|
233 |
struct request_queue *q = bd->queue; |
2c9ecdf40
|
234 |
struct request *rq, *next_rq = NULL; |
25fd16430
|
235 |
int ret, rw; |
70e36ecea
|
236 237 |
unsigned int dxfer_len; void *dxferp = NULL; |
3d6392cfb
|
238 |
|
70e36ecea
|
239 240 241 242 |
dprintk("map hdr %llx/%u %llx/%u ", (unsigned long long) hdr->dout_xferp, hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, hdr->din_xfer_len); |
3d6392cfb
|
243 |
|
70e36ecea
|
244 |
ret = bsg_validate_sgv4_hdr(q, hdr, &rw); |
3d6392cfb
|
245 246 247 248 249 250 251 |
if (ret) return ERR_PTR(ret); /* * map scatter-gather elements seperately and string them to request */ rq = blk_get_request(q, rw, GFP_KERNEL); |
2c9ecdf40
|
252 253 |
if (!rq) return ERR_PTR(-ENOMEM); |
0b07de85a
|
254 |
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd); |
2c9ecdf40
|
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 |
if (ret) goto out; if (rw == WRITE && hdr->din_xfer_len) { if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { ret = -EOPNOTSUPP; goto out; } next_rq = blk_get_request(q, READ, GFP_KERNEL); if (!next_rq) { ret = -ENOMEM; goto out; } rq->next_rq = next_rq; |
40f620286
|
270 |
next_rq->cmd_type = rq->cmd_type; |
2c9ecdf40
|
271 272 273 274 275 |
dxferp = (void*)(unsigned long)hdr->din_xferp; ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len); if (ret) goto out; |
3d6392cfb
|
276 |
} |
70e36ecea
|
277 278 279 280 281 282 283 284 285 286 287 |
if (hdr->dout_xfer_len) { dxfer_len = hdr->dout_xfer_len; dxferp = (void*)(unsigned long)hdr->dout_xferp; } else if (hdr->din_xfer_len) { dxfer_len = hdr->din_xfer_len; dxferp = (void*)(unsigned long)hdr->din_xferp; } else dxfer_len = 0; if (dxfer_len) { ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); |
2c9ecdf40
|
288 289 |
if (ret) goto out; |
3d6392cfb
|
290 |
} |
3d6392cfb
|
291 |
return rq; |
2c9ecdf40
|
292 |
out: |
9f5de6b10
|
293 294 |
if (rq->cmd != rq->__cmd) kfree(rq->cmd); |
2c9ecdf40
|
295 296 297 298 299 300 |
blk_put_request(rq); if (next_rq) { blk_rq_unmap_user(next_rq->bio); blk_put_request(next_rq); } return ERR_PTR(ret); |
3d6392cfb
|
301 302 303 304 305 306 307 308 309 310 311 |
} /* * async completion call-back from the block layer, when scsi/ide/whatever * calls end_that_request_last() on a request */ static void bsg_rq_end_io(struct request *rq, int uptodate) { struct bsg_command *bc = rq->end_io_data; struct bsg_device *bd = bc->bd; unsigned long flags; |
5309cb38d
|
312 313 314 |
dprintk("%s: finished rq %p bc %p, bio %p stat %d ", bd->name, rq, bc, bc->bio, uptodate); |
3d6392cfb
|
315 316 317 318 |
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); spin_lock_irqsave(&bd->lock, flags); |
25fd16430
|
319 320 |
list_move_tail(&bc->list, &bd->done_list); bd->done_cmds++; |
3d6392cfb
|
321 |
spin_unlock_irqrestore(&bd->lock, flags); |
25fd16430
|
322 323 |
wake_up(&bd->wq_done); |
3d6392cfb
|
324 325 326 327 328 329 |
} /* * do final setup of a 'bc' and submit the matching 'rq' to the block * layer for io */ |
165125e1e
|
330 |
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, |
3d6392cfb
|
331 332 333 334 335 336 337 338 339 340 |
struct bsg_command *bc, struct request *rq) { rq->sense = bc->sense; rq->sense_len = 0; /* * add bc command to busy queue and submit rq for io */ bc->rq = rq; bc->bio = rq->bio; |
2c9ecdf40
|
341 342 |
if (rq->next_rq) bc->bidi_bio = rq->next_rq->bio; |
3d6392cfb
|
343 344 345 346 347 348 349 350 351 |
bc->hdr.duration = jiffies; spin_lock_irq(&bd->lock); list_add_tail(&bc->list, &bd->busy_list); spin_unlock_irq(&bd->lock); dprintk("%s: queueing rq %p, bc %p ", bd->name, rq, bc); rq->end_io_data = bc; |
d351af01b
|
352 |
blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io); |
3d6392cfb
|
353 |
} |
25fd16430
|
354 |
static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) |
3d6392cfb
|
355 356 357 358 359 |
{ struct bsg_command *bc = NULL; spin_lock_irq(&bd->lock); if (bd->done_cmds) { |
43ac9e62c
|
360 |
bc = list_first_entry(&bd->done_list, struct bsg_command, list); |
25fd16430
|
361 362 |
list_del(&bc->list); bd->done_cmds--; |
3d6392cfb
|
363 364 365 366 367 368 369 370 371 |
} spin_unlock_irq(&bd->lock); return bc; } /* * Get a finished command from the done list */ |
e7d721732
|
372 |
static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) |
3d6392cfb
|
373 374 375 376 377 378 379 380 |
{ struct bsg_command *bc; int ret; do { bc = bsg_next_done_cmd(bd); if (bc) break; |
e7d721732
|
381 382 383 384 385 386 |
if (!test_bit(BSG_F_BLOCK, &bd->flags)) { bc = ERR_PTR(-EAGAIN); break; } ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); |
3d6392cfb
|
387 |
if (ret) { |
e7d721732
|
388 |
bc = ERR_PTR(-ERESTARTSYS); |
3d6392cfb
|
389 390 391 392 393 394 395 396 397 |
break; } } while (1); dprintk("%s: returning done %p ", bd->name, bc); return bc; } |
70e36ecea
|
398 |
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, |
2c9ecdf40
|
399 |
struct bio *bio, struct bio *bidi_bio) |
70e36ecea
|
400 401 402 403 404 405 406 407 408 409 410 411 412 413 |
{ int ret = 0; dprintk("rq %p bio %p %u ", rq, bio, rq->errors); /* * fill in all the output members */ hdr->device_status = status_byte(rq->errors); hdr->transport_status = host_byte(rq->errors); hdr->driver_status = driver_byte(rq->errors); hdr->info = 0; if (hdr->device_status || hdr->transport_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; |
70e36ecea
|
414 415 416 |
hdr->response_len = 0; if (rq->sense_len && hdr->response) { |
25fd16430
|
417 418 |
int len = min_t(unsigned int, hdr->max_response_len, rq->sense_len); |
70e36ecea
|
419 420 421 422 423 424 425 426 |
ret = copy_to_user((void*)(unsigned long)hdr->response, rq->sense, len); if (!ret) hdr->response_len = len; else ret = -EFAULT; } |
2c9ecdf40
|
427 |
if (rq->next_rq) { |
7a85f8896
|
428 429 |
hdr->dout_resid = rq->data_len; hdr->din_resid = rq->next_rq->data_len; |
2c9ecdf40
|
430 431 |
blk_rq_unmap_user(bidi_bio); blk_put_request(rq->next_rq); |
0c6a89ba6
|
432 |
} else if (rq_data_dir(rq) == READ) |
7a85f8896
|
433 |
hdr->din_resid = rq->data_len; |
0c6a89ba6
|
434 |
else |
7a85f8896
|
435 |
hdr->dout_resid = rq->data_len; |
2c9ecdf40
|
436 |
|
2d507a01d
|
437 438 439 440 441 442 443 444 |
/* * If the request generated a negative error number, return it * (providing we aren't already returning an error); if it's * just a protocol response (i.e. non negative), that gets * processed above. */ if (!ret && rq->errors < 0) ret = rq->errors; |
70e36ecea
|
445 |
blk_rq_unmap_user(bio); |
9f5de6b10
|
446 447 |
if (rq->cmd != rq->__cmd) kfree(rq->cmd); |
70e36ecea
|
448 449 450 451 |
blk_put_request(rq); return ret; } |
3d6392cfb
|
452 453 454 455 456 457 458 |
static int bsg_complete_all_commands(struct bsg_device *bd) { struct bsg_command *bc; int ret, tret; dprintk("%s: entered ", bd->name); |
3d6392cfb
|
459 460 461 462 463 |
/* * wait for all commands to complete */ ret = 0; do { |
25fd16430
|
464 |
ret = bsg_io_schedule(bd); |
3d6392cfb
|
465 466 467 468 469 470 471 472 473 474 475 476 477 478 |
/* * look for -ENODATA specifically -- we'll sometimes get * -ERESTARTSYS when we've taken a signal, but we can't * return until we're done freeing the queue, so ignore * it. The signal will get handled when we're done freeing * the bsg_device. */ } while (ret != -ENODATA); /* * discard done commands */ ret = 0; do { |
e7d721732
|
479 480 481 |
spin_lock_irq(&bd->lock); if (!bd->queued_cmds) { spin_unlock_irq(&bd->lock); |
3d6392cfb
|
482 483 |
break; } |
efba1a31f
|
484 |
spin_unlock_irq(&bd->lock); |
3d6392cfb
|
485 |
|
e7d721732
|
486 487 488 |
bc = bsg_get_done_cmd(bd); if (IS_ERR(bc)) break; |
2c9ecdf40
|
489 490 |
tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); |
3d6392cfb
|
491 492 493 494 495 496 497 498 |
if (!ret) ret = tret; bsg_free_command(bc); } while (1); return ret; } |
25fd16430
|
499 |
static int |
e7d721732
|
500 501 |
__bsg_read(char __user *buf, size_t count, struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read) |
3d6392cfb
|
502 503 504 |
{ struct bsg_command *bc; int nr_commands, ret; |
70e36ecea
|
505 |
if (count % sizeof(struct sg_io_v4)) |
3d6392cfb
|
506 507 508 |
return -EINVAL; ret = 0; |
70e36ecea
|
509 |
nr_commands = count / sizeof(struct sg_io_v4); |
3d6392cfb
|
510 |
while (nr_commands) { |
e7d721732
|
511 |
bc = bsg_get_done_cmd(bd); |
3d6392cfb
|
512 513 514 515 516 517 518 519 520 521 |
if (IS_ERR(bc)) { ret = PTR_ERR(bc); break; } /* * this is the only case where we need to copy data back * after completing the request. so do that here, * bsg_complete_work() cannot do that for us */ |
2c9ecdf40
|
522 523 |
ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); |
3d6392cfb
|
524 |
|
25fd16430
|
525 |
if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) |
3d6392cfb
|
526 527 528 529 530 531 |
ret = -EFAULT; bsg_free_command(bc); if (ret) break; |
70e36ecea
|
532 533 |
buf += sizeof(struct sg_io_v4); *bytes_read += sizeof(struct sg_io_v4); |
3d6392cfb
|
534 535 536 537 538 539 540 541 542 543 544 545 546 |
nr_commands--; } return ret; } static inline void bsg_set_block(struct bsg_device *bd, struct file *file) { if (file->f_flags & O_NONBLOCK) clear_bit(BSG_F_BLOCK, &bd->flags); else set_bit(BSG_F_BLOCK, &bd->flags); } |
0b07de85a
|
547 548 |
static void bsg_set_cmd_filter(struct bsg_device *bd, struct file *file) |
3d6392cfb
|
549 |
{ |
0b07de85a
|
550 551 552 553 554 555 556 557 558 559 560 561 562 563 |
struct inode *inode; struct gendisk *disk; if (!file) return; inode = file->f_dentry->d_inode; if (!inode) return; disk = inode->i_bdev->bd_disk; bd->cmd_filter = &disk->cmd_filter; bd->f_mode = &file->f_mode; |
3d6392cfb
|
564 |
} |
25fd16430
|
565 566 567 |
/* * Check if the error is a "real" error that we should return. */ |
3d6392cfb
|
568 569 570 571 572 573 574 575 576 577 578 579 580 581 |
static inline int err_block_err(int ret) { if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) return 1; return 0; } static ssize_t bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; int ret; ssize_t bytes_read; |
9e69fbb53
|
582 583 |
dprintk("%s: read %Zd bytes ", bd->name, count); |
3d6392cfb
|
584 585 |
bsg_set_block(bd, file); |
0b07de85a
|
586 |
bsg_set_cmd_filter(bd, file); |
3d6392cfb
|
587 |
bytes_read = 0; |
e7d721732
|
588 |
ret = __bsg_read(buf, count, bd, NULL, &bytes_read); |
3d6392cfb
|
589 590 591 592 593 594 595 |
*ppos = bytes_read; if (!bytes_read || (bytes_read && err_block_err(ret))) bytes_read = ret; return bytes_read; } |
25fd16430
|
596 597 |
static int __bsg_write(struct bsg_device *bd, const char __user *buf, size_t count, ssize_t *bytes_written) |
3d6392cfb
|
598 599 600 601 |
{ struct bsg_command *bc; struct request *rq; int ret, nr_commands; |
70e36ecea
|
602 |
if (count % sizeof(struct sg_io_v4)) |
3d6392cfb
|
603 |
return -EINVAL; |
70e36ecea
|
604 |
nr_commands = count / sizeof(struct sg_io_v4); |
3d6392cfb
|
605 606 607 608 |
rq = NULL; bc = NULL; ret = 0; while (nr_commands) { |
165125e1e
|
609 |
struct request_queue *q = bd->queue; |
3d6392cfb
|
610 |
|
e7d721732
|
611 |
bc = bsg_alloc_command(bd); |
3d6392cfb
|
612 613 614 615 616 |
if (IS_ERR(bc)) { ret = PTR_ERR(bc); bc = NULL; break; } |
3d6392cfb
|
617 618 619 620 621 622 623 624 |
if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { ret = -EFAULT; break; } /* * get a request, fill in the blanks, and add to request queue */ |
70e36ecea
|
625 |
rq = bsg_map_hdr(bd, &bc->hdr); |
3d6392cfb
|
626 627 628 629 630 631 632 633 634 635 |
if (IS_ERR(rq)) { ret = PTR_ERR(rq); rq = NULL; break; } bsg_add_command(bd, q, bc, rq); bc = NULL; rq = NULL; nr_commands--; |
70e36ecea
|
636 |
buf += sizeof(struct sg_io_v4); |
25fd16430
|
637 |
*bytes_written += sizeof(struct sg_io_v4); |
3d6392cfb
|
638 |
} |
3d6392cfb
|
639 640 641 642 643 644 645 646 647 648 |
if (bc) bsg_free_command(bc); return ret; } static ssize_t bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; |
25fd16430
|
649 |
ssize_t bytes_written; |
3d6392cfb
|
650 |
int ret; |
9e69fbb53
|
651 652 |
dprintk("%s: write %Zd bytes ", bd->name, count); |
3d6392cfb
|
653 654 |
bsg_set_block(bd, file); |
0b07de85a
|
655 |
bsg_set_cmd_filter(bd, file); |
3d6392cfb
|
656 |
|
25fd16430
|
657 658 659 |
bytes_written = 0; ret = __bsg_write(bd, buf, count, &bytes_written); *ppos = bytes_written; |
3d6392cfb
|
660 661 662 663 |
/* * return bytes written on non-fatal errors */ |
25fd16430
|
664 665 |
if (!bytes_written || (bytes_written && err_block_err(ret))) bytes_written = ret; |
3d6392cfb
|
666 |
|
25fd16430
|
667 668 669 |
dprintk("%s: returning %Zd ", bd->name, bytes_written); return bytes_written; |
3d6392cfb
|
670 |
} |
3d6392cfb
|
671 672 |
static struct bsg_device *bsg_alloc_device(void) { |
3d6392cfb
|
673 |
struct bsg_device *bd; |
3d6392cfb
|
674 675 676 677 678 679 |
bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); if (unlikely(!bd)) return NULL; spin_lock_init(&bd->lock); |
5309cb38d
|
680 |
bd->max_queue = BSG_DEFAULT_CMDS; |
3d6392cfb
|
681 682 683 684 685 686 687 688 |
INIT_LIST_HEAD(&bd->busy_list); INIT_LIST_HEAD(&bd->done_list); INIT_HLIST_NODE(&bd->dev_list); init_waitqueue_head(&bd->wq_free); init_waitqueue_head(&bd->wq_done); return bd; |
3d6392cfb
|
689 |
} |
97f46ae45
|
690 691 692 693 |
static void bsg_kref_release_function(struct kref *kref) { struct bsg_class_device *bcd = container_of(kref, struct bsg_class_device, ref); |
8df5fc042
|
694 |
struct device *parent = bcd->parent; |
97f46ae45
|
695 696 697 |
if (bcd->release) bcd->release(bcd->parent); |
8df5fc042
|
698 |
put_device(parent); |
97f46ae45
|
699 |
} |
3d6392cfb
|
700 701 |
static int bsg_put_device(struct bsg_device *bd) { |
97f46ae45
|
702 703 |
int ret = 0, do_free; struct request_queue *q = bd->queue; |
3d6392cfb
|
704 705 |
mutex_lock(&bsg_mutex); |
97f46ae45
|
706 |
do_free = atomic_dec_and_test(&bd->ref_count); |
3f27e3ed1
|
707 708 |
if (!do_free) { mutex_unlock(&bsg_mutex); |
3d6392cfb
|
709 |
goto out; |
3f27e3ed1
|
710 711 712 713 |
} hlist_del(&bd->dev_list); mutex_unlock(&bsg_mutex); |
3d6392cfb
|
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 |
dprintk("%s: tearing down ", bd->name); /* * close can always block */ set_bit(BSG_F_BLOCK, &bd->flags); /* * correct error detection baddies here again. it's the responsibility * of the app to properly reap commands before close() if it wants * fool-proof error detection */ ret = bsg_complete_all_commands(bd); |
5309cb38d
|
729 |
kfree(bd); |
3d6392cfb
|
730 |
out: |
97f46ae45
|
731 732 733 |
kref_put(&q->bsg_dev.ref, bsg_kref_release_function); if (do_free) blk_put_queue(q); |
3d6392cfb
|
734 735 736 737 |
return ret; } static struct bsg_device *bsg_add_device(struct inode *inode, |
d351af01b
|
738 |
struct request_queue *rq, |
3d6392cfb
|
739 740 |
struct file *file) { |
25fd16430
|
741 |
struct bsg_device *bd; |
c3ff1b90d
|
742 |
int ret; |
3d6392cfb
|
743 744 745 |
#ifdef BSG_DEBUG unsigned char buf[32]; #endif |
c3ff1b90d
|
746 747 748 |
ret = blk_get_queue(rq); if (ret) return ERR_PTR(-ENXIO); |
3d6392cfb
|
749 750 |
bd = bsg_alloc_device(); |
c3ff1b90d
|
751 752 |
if (!bd) { blk_put_queue(rq); |
3d6392cfb
|
753 |
return ERR_PTR(-ENOMEM); |
c3ff1b90d
|
754 |
} |
3d6392cfb
|
755 |
|
d351af01b
|
756 |
bd->queue = rq; |
0b07de85a
|
757 |
|
3d6392cfb
|
758 |
bsg_set_block(bd, file); |
0b07de85a
|
759 |
bsg_set_cmd_filter(bd, file); |
3d6392cfb
|
760 761 |
atomic_set(&bd->ref_count, 1); |
3d6392cfb
|
762 |
mutex_lock(&bsg_mutex); |
842ea771c
|
763 |
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); |
3d6392cfb
|
764 |
|
ee959b00c
|
765 |
strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1); |
3d6392cfb
|
766 767 |
dprintk("bound to <%s>, max queue %d ", |
9e69fbb53
|
768 |
format_dev_t(buf, inode->i_rdev), bd->max_queue); |
3d6392cfb
|
769 770 771 772 |
mutex_unlock(&bsg_mutex); return bd; } |
842ea771c
|
773 |
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) |
3d6392cfb
|
774 |
{ |
43ac9e62c
|
775 |
struct bsg_device *bd; |
3d6392cfb
|
776 777 778 |
struct hlist_node *entry; mutex_lock(&bsg_mutex); |
43ac9e62c
|
779 |
hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { |
842ea771c
|
780 |
if (bd->queue == q) { |
3d6392cfb
|
781 |
atomic_inc(&bd->ref_count); |
43ac9e62c
|
782 |
goto found; |
3d6392cfb
|
783 |
} |
3d6392cfb
|
784 |
} |
43ac9e62c
|
785 786 |
bd = NULL; found: |
3d6392cfb
|
787 788 789 790 791 792 |
mutex_unlock(&bsg_mutex); return bd; } static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) { |
598443a21
|
793 794 |
struct bsg_device *bd; struct bsg_class_device *bcd; |
3d6392cfb
|
795 |
|
3d6392cfb
|
796 797 798 |
/* * find the class device */ |
3d6392cfb
|
799 |
mutex_lock(&bsg_mutex); |
598443a21
|
800 |
bcd = idr_find(&bsg_minor_idr, iminor(inode)); |
d45ac4fa8
|
801 |
if (bcd) |
97f46ae45
|
802 |
kref_get(&bcd->ref); |
3d6392cfb
|
803 804 805 806 |
mutex_unlock(&bsg_mutex); if (!bcd) return ERR_PTR(-ENODEV); |
842ea771c
|
807 |
bd = __bsg_get_device(iminor(inode), bcd->queue); |
d45ac4fa8
|
808 809 810 811 812 |
if (bd) return bd; bd = bsg_add_device(inode, bcd->queue, file); if (IS_ERR(bd)) |
97f46ae45
|
813 |
kref_put(&bcd->ref, bsg_kref_release_function); |
d45ac4fa8
|
814 815 |
return bd; |
3d6392cfb
|
816 817 818 819 |
} static int bsg_open(struct inode *inode, struct file *file) { |
75bd2ef14
|
820 821 822 823 824 |
struct bsg_device *bd; lock_kernel(); bd = bsg_get_device(inode, file); unlock_kernel(); |
3d6392cfb
|
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 |
if (IS_ERR(bd)) return PTR_ERR(bd); file->private_data = bd; return 0; } static int bsg_release(struct inode *inode, struct file *file) { struct bsg_device *bd = file->private_data; file->private_data = NULL; return bsg_put_device(bd); } static unsigned int bsg_poll(struct file *file, poll_table *wait) { struct bsg_device *bd = file->private_data; unsigned int mask = 0; poll_wait(file, &bd->wq_done, wait); poll_wait(file, &bd->wq_free, wait); spin_lock_irq(&bd->lock); if (!list_empty(&bd->done_list)) mask |= POLLIN | POLLRDNORM; if (bd->queued_cmds >= bd->max_queue) mask |= POLLOUT; spin_unlock_irq(&bd->lock); return mask; } |
25fd16430
|
858 |
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
3d6392cfb
|
859 860 861 |
{ struct bsg_device *bd = file->private_data; int __user *uarg = (int __user *) arg; |
2d507a01d
|
862 |
int ret; |
3d6392cfb
|
863 |
|
3d6392cfb
|
864 865 866 867 868 869 |
switch (cmd) { /* * our own ioctls */ case SG_GET_COMMAND_Q: return put_user(bd->max_queue, uarg); |
5309cb38d
|
870 |
case SG_SET_COMMAND_Q: { |
3d6392cfb
|
871 872 873 874 |
int queue; if (get_user(queue, uarg)) return -EFAULT; |
5309cb38d
|
875 |
if (queue < 1) |
3d6392cfb
|
876 |
return -EINVAL; |
5309cb38d
|
877 |
spin_lock_irq(&bd->lock); |
3d6392cfb
|
878 |
bd->max_queue = queue; |
5309cb38d
|
879 |
spin_unlock_irq(&bd->lock); |
3d6392cfb
|
880 881 882 883 884 885 886 887 888 889 890 891 892 893 |
return 0; } /* * SCSI/sg ioctls */ case SG_GET_VERSION_NUM: case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SG_SET_TIMEOUT: case SG_GET_TIMEOUT: case SG_GET_RESERVED_SIZE: case SG_SET_RESERVED_SIZE: case SG_EMULATED_HOST: |
3d6392cfb
|
894 895 |
case SCSI_IOCTL_SEND_COMMAND: { void __user *uarg = (void __user *) arg; |
d351af01b
|
896 |
return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg); |
3d6392cfb
|
897 |
} |
10e8855b9
|
898 899 |
case SG_IO: { struct request *rq; |
2c9ecdf40
|
900 |
struct bio *bio, *bidi_bio = NULL; |
10e8855b9
|
901 902 903 904 905 906 907 908 909 910 |
struct sg_io_v4 hdr; if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; rq = bsg_map_hdr(bd, &hdr); if (IS_ERR(rq)) return PTR_ERR(rq); bio = rq->bio; |
2c9ecdf40
|
911 912 |
if (rq->next_rq) bidi_bio = rq->next_rq->bio; |
d351af01b
|
913 |
blk_execute_rq(bd->queue, NULL, rq, 0); |
2d507a01d
|
914 |
ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); |
10e8855b9
|
915 916 917 |
if (copy_to_user(uarg, &hdr, sizeof(hdr))) return -EFAULT; |
b711afa69
|
918 |
|
2d507a01d
|
919 |
return ret; |
10e8855b9
|
920 |
} |
3d6392cfb
|
921 922 923 924 925 926 927 928 929 930 931 |
/* * block device ioctls */ default: #if 0 return ioctl_by_bdev(bd->bdev, cmd, arg); #else return -ENOTTY; #endif } } |
7344be053
|
932 |
static const struct file_operations bsg_fops = { |
3d6392cfb
|
933 934 935 936 937 |
.read = bsg_read, .write = bsg_write, .poll = bsg_poll, .open = bsg_open, .release = bsg_release, |
25fd16430
|
938 |
.unlocked_ioctl = bsg_ioctl, |
3d6392cfb
|
939 940 |
.owner = THIS_MODULE, }; |
d351af01b
|
941 |
void bsg_unregister_queue(struct request_queue *q) |
3d6392cfb
|
942 |
{ |
d351af01b
|
943 |
struct bsg_class_device *bcd = &q->bsg_dev; |
3d6392cfb
|
944 |
|
df468820b
|
945 946 |
if (!bcd->class_dev) return; |
3d6392cfb
|
947 948 |
mutex_lock(&bsg_mutex); |
598443a21
|
949 |
idr_remove(&bsg_minor_idr, bcd->minor); |
d351af01b
|
950 |
sysfs_remove_link(&q->kobj, "bsg"); |
ee959b00c
|
951 |
device_unregister(bcd->class_dev); |
3d6392cfb
|
952 |
bcd->class_dev = NULL; |
97f46ae45
|
953 |
kref_put(&bcd->ref, bsg_kref_release_function); |
3d6392cfb
|
954 955 |
mutex_unlock(&bsg_mutex); } |
4cf0723ac
|
956 |
EXPORT_SYMBOL_GPL(bsg_unregister_queue); |
3d6392cfb
|
957 |
|
97f46ae45
|
958 959 |
int bsg_register_queue(struct request_queue *q, struct device *parent, const char *name, void (*release)(struct device *)) |
3d6392cfb
|
960 |
{ |
598443a21
|
961 |
struct bsg_class_device *bcd; |
3d6392cfb
|
962 |
dev_t dev; |
598443a21
|
963 |
int ret, minor; |
ee959b00c
|
964 |
struct device *class_dev = NULL; |
39dca558a
|
965 966 967 968 969 |
const char *devname; if (name) devname = name; else |
97f46ae45
|
970 |
devname = parent->bus_id; |
3d6392cfb
|
971 972 973 974 975 976 |
/* * we need a proper transport to send commands, not a stacked device */ if (!q->request_fn) return 0; |
d351af01b
|
977 |
bcd = &q->bsg_dev; |
3d6392cfb
|
978 |
memset(bcd, 0, sizeof(*bcd)); |
3d6392cfb
|
979 980 |
mutex_lock(&bsg_mutex); |
292b7f271
|
981 |
|
598443a21
|
982 983 984 985 |
ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); if (!ret) { ret = -ENOMEM; goto unlock; |
292b7f271
|
986 |
} |
598443a21
|
987 988 989 |
ret = idr_get_new(&bsg_minor_idr, bcd, &minor); if (ret < 0) goto unlock; |
292b7f271
|
990 |
|
598443a21
|
991 992 993 994 995 996 997 998 |
if (minor >= BSG_MAX_DEVS) { printk(KERN_ERR "bsg: too many bsg devices "); ret = -EINVAL; goto remove_idr; } bcd->minor = minor; |
d351af01b
|
999 |
bcd->queue = q; |
97f46ae45
|
1000 1001 1002 |
bcd->parent = get_device(parent); bcd->release = release; kref_init(&bcd->ref); |
46f6ef4af
|
1003 |
dev = MKDEV(bsg_major, bcd->minor); |
f79f06056
|
1004 1005 |
class_dev = device_create_drvdata(bsg_class, parent, dev, NULL, "%s", devname); |
4e2872d6b
|
1006 1007 |
if (IS_ERR(class_dev)) { ret = PTR_ERR(class_dev); |
598443a21
|
1008 |
goto put_dev; |
4e2872d6b
|
1009 1010 |
} bcd->class_dev = class_dev; |
abce891a1
|
1011 |
if (q->kobj.sd) { |
4e2872d6b
|
1012 1013 |
ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); if (ret) |
598443a21
|
1014 |
goto unregister_class_dev; |
4e2872d6b
|
1015 |
} |
3d6392cfb
|
1016 1017 |
mutex_unlock(&bsg_mutex); return 0; |
6826ee4fd
|
1018 |
|
598443a21
|
1019 |
unregister_class_dev: |
ee959b00c
|
1020 |
device_unregister(class_dev); |
598443a21
|
1021 |
put_dev: |
97f46ae45
|
1022 |
put_device(parent); |
598443a21
|
1023 1024 1025 |
remove_idr: idr_remove(&bsg_minor_idr, minor); unlock: |
264a04721
|
1026 |
mutex_unlock(&bsg_mutex); |
4e2872d6b
|
1027 1028 |
return ret; } |
4cf0723ac
|
1029 |
EXPORT_SYMBOL_GPL(bsg_register_queue); |
4e2872d6b
|
1030 |
|
7e7654a92
|
1031 |
static struct cdev bsg_cdev; |
292b7f271
|
1032 |
|
3d6392cfb
|
1033 1034 1035 |
static int __init bsg_init(void) { int ret, i; |
46f6ef4af
|
1036 |
dev_t devid; |
3d6392cfb
|
1037 |
|
5309cb38d
|
1038 |
bsg_cmd_cachep = kmem_cache_create("bsg_cmd", |
20c2df83d
|
1039 |
sizeof(struct bsg_command), 0, 0, NULL); |
5309cb38d
|
1040 1041 1042 1043 1044 |
if (!bsg_cmd_cachep) { printk(KERN_ERR "bsg: failed creating slab cache "); return -ENOMEM; } |
25fd16430
|
1045 |
for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) |
3d6392cfb
|
1046 1047 1048 |
INIT_HLIST_HEAD(&bsg_device_list[i]); bsg_class = class_create(THIS_MODULE, "bsg"); |
5309cb38d
|
1049 |
if (IS_ERR(bsg_class)) { |
9b9f770ce
|
1050 1051 |
ret = PTR_ERR(bsg_class); goto destroy_kmemcache; |
5309cb38d
|
1052 |
} |
3d6392cfb
|
1053 |
|
46f6ef4af
|
1054 |
ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); |
9b9f770ce
|
1055 1056 |
if (ret) goto destroy_bsg_class; |
292b7f271
|
1057 |
|
46f6ef4af
|
1058 |
bsg_major = MAJOR(devid); |
292b7f271
|
1059 |
cdev_init(&bsg_cdev, &bsg_fops); |
46f6ef4af
|
1060 |
ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); |
9b9f770ce
|
1061 1062 |
if (ret) goto unregister_chrdev; |
3d6392cfb
|
1063 |
|
5d3a8cd34
|
1064 |
printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION |
0ed081ce2
|
1065 1066 |
" loaded (major %d) ", bsg_major); |
3d6392cfb
|
1067 |
return 0; |
9b9f770ce
|
1068 1069 1070 1071 1072 1073 1074 |
unregister_chrdev: unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); destroy_bsg_class: class_destroy(bsg_class); destroy_kmemcache: kmem_cache_destroy(bsg_cmd_cachep); return ret; |
3d6392cfb
|
1075 1076 1077 |
} MODULE_AUTHOR("Jens Axboe"); |
0ed081ce2
|
1078 |
MODULE_DESCRIPTION(BSG_DESCRIPTION); |
3d6392cfb
|
1079 |
MODULE_LICENSE("GPL"); |
4e2872d6b
|
1080 |
device_initcall(bsg_init); |