Commit 70e36eceaf897da11aa0b4d82b46ca66e65a05f1
Committed by
Jens Axboe
1 parent
45977d0e87
Exists in
master
and in
20 other branches
bsg: replace SG v3 with SG v4
This patch replaces SG v3 in bsg with SG v4 (except for SG_IO). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Showing 1 changed file with 121 additions and 77 deletions Side-by-side Diff
block/bsg.c
... | ... | @@ -103,8 +103,8 @@ |
103 | 103 | struct request *rq; |
104 | 104 | struct bio *bio; |
105 | 105 | int err; |
106 | - struct sg_io_hdr hdr; | |
107 | - struct sg_io_hdr __user *uhdr; | |
106 | + struct sg_io_v4 hdr; | |
107 | + struct sg_io_v4 __user *uhdr; | |
108 | 108 | char sense[SCSI_SENSE_BUFFERSIZE]; |
109 | 109 | }; |
110 | 110 | |
111 | 111 | |
112 | 112 | |
113 | 113 | |
114 | 114 | |
115 | 115 | |
116 | 116 | |
117 | 117 | |
118 | 118 | |
119 | 119 | |
120 | 120 | |
121 | 121 | |
122 | 122 | |
123 | 123 | |
124 | 124 | |
125 | 125 | |
... | ... | @@ -235,57 +235,82 @@ |
235 | 235 | return bc; |
236 | 236 | } |
237 | 237 | |
238 | +static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, | |
239 | + struct sg_io_v4 *hdr, int has_write_perm) | |
240 | +{ | |
241 | + memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ | |
242 | + | |
243 | + if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, | |
244 | + hdr->request_len)) | |
245 | + return -EFAULT; | |
246 | + if (blk_verify_command(rq->cmd, has_write_perm)) | |
247 | + return -EPERM; | |
248 | + | |
249 | + /* | |
250 | + * fill in request structure | |
251 | + */ | |
252 | + rq->cmd_len = hdr->request_len; | |
253 | + rq->cmd_type = REQ_TYPE_BLOCK_PC; | |
254 | + | |
255 | + rq->timeout = (hdr->timeout * HZ) / 1000; | |
256 | + if (!rq->timeout) | |
257 | + rq->timeout = q->sg_timeout; | |
258 | + if (!rq->timeout) | |
259 | + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | |
260 | + | |
261 | + return 0; | |
262 | +} | |
263 | + | |
238 | 264 | /* |
239 | - * Check if sg_io_hdr from user is allowed and valid | |
265 | + * Check if sg_io_v4 from user is allowed and valid | |
240 | 266 | */ |
241 | 267 | static int |
242 | -bsg_validate_sghdr(request_queue_t *q, struct sg_io_hdr *hdr, int *rw) | |
268 | +bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) | |
243 | 269 | { |
244 | - if (hdr->interface_id != 'S') | |
270 | + if (hdr->guard != 'Q') | |
245 | 271 | return -EINVAL; |
246 | - if (hdr->cmd_len > BLK_MAX_CDB) | |
272 | + if (hdr->request_len > BLK_MAX_CDB) | |
247 | 273 | return -EINVAL; |
248 | - if (hdr->dxfer_len > (q->max_sectors << 9)) | |
274 | + if (hdr->dout_xfer_len > (q->max_sectors << 9) || | |
275 | + hdr->din_xfer_len > (q->max_sectors << 9)) | |
249 | 276 | return -EIO; |
250 | 277 | |
278 | + /* not supported currently */ | |
279 | + if (hdr->protocol || hdr->subprotocol) | |
280 | + return -EINVAL; | |
281 | + | |
251 | 282 | /* |
252 | 283 | * looks sane, if no data then it should be fine from our POV |
253 | 284 | */ |
254 | - if (!hdr->dxfer_len) | |
285 | + if (!hdr->dout_xfer_len && !hdr->din_xfer_len) | |
255 | 286 | return 0; |
256 | 287 | |
257 | - switch (hdr->dxfer_direction) { | |
258 | - case SG_DXFER_TO_FROM_DEV: | |
259 | - case SG_DXFER_FROM_DEV: | |
260 | - *rw = READ; | |
261 | - break; | |
262 | - case SG_DXFER_TO_DEV: | |
263 | - *rw = WRITE; | |
264 | - break; | |
265 | - default: | |
266 | - return -EINVAL; | |
267 | - } | |
288 | + /* not supported currently */ | |
289 | + if (hdr->dout_xfer_len && hdr->din_xfer_len) | |
290 | + return -EINVAL; | |
268 | 291 | |
292 | + *rw = hdr->dout_xfer_len ? WRITE : READ; | |
293 | + | |
269 | 294 | return 0; |
270 | 295 | } |
271 | 296 | |
272 | 297 | /* |
273 | - * map sg_io_hdr to a request. for scatter-gather sg_io_hdr, we map | |
274 | - * each segment to a bio and string multiple bio's to the request | |
298 | + * map sg_io_v4 to a request. | |
275 | 299 | */ |
276 | 300 | static struct request * |
277 | -bsg_map_hdr(struct bsg_device *bd, int rw, struct sg_io_hdr *hdr) | |
301 | +bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) | |
278 | 302 | { |
279 | 303 | request_queue_t *q = bd->queue; |
280 | - struct sg_iovec iov; | |
281 | - struct sg_iovec __user *u_iov; | |
282 | 304 | struct request *rq; |
283 | - int ret, i = 0; | |
305 | + int ret, rw; | |
306 | + unsigned int dxfer_len; | |
307 | + void *dxferp = NULL; | |
284 | 308 | |
285 | - dprintk("map hdr %p/%d/%d\n", hdr->dxferp, hdr->dxfer_len, | |
286 | - hdr->iovec_count); | |
309 | + dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, | |
310 | + hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, | |
311 | + hdr->din_xfer_len); | |
287 | 312 | |
288 | - ret = bsg_validate_sghdr(q, hdr, &rw); | |
313 | + ret = bsg_validate_sgv4_hdr(q, hdr, &rw); | |
289 | 314 | if (ret) |
290 | 315 | return ERR_PTR(ret); |
291 | 316 | |
292 | 317 | |
293 | 318 | |
294 | 319 | |
295 | 320 | |
... | ... | @@ -293,46 +318,31 @@ |
293 | 318 | * map scatter-gather elements seperately and string them to request |
294 | 319 | */ |
295 | 320 | rq = blk_get_request(q, rw, GFP_KERNEL); |
296 | - ret = blk_fill_sghdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM, | |
297 | - &bd->flags)); | |
321 | + ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM, | |
322 | + &bd->flags)); | |
298 | 323 | if (ret) { |
299 | 324 | blk_put_request(rq); |
300 | 325 | return ERR_PTR(ret); |
301 | 326 | } |
302 | 327 | |
303 | - if (!hdr->iovec_count) { | |
304 | - ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); | |
305 | - if (ret) | |
306 | - goto out; | |
307 | - } | |
328 | + if (hdr->dout_xfer_len) { | |
329 | + dxfer_len = hdr->dout_xfer_len; | |
330 | + dxferp = (void*)(unsigned long)hdr->dout_xferp; | |
331 | + } else if (hdr->din_xfer_len) { | |
332 | + dxfer_len = hdr->din_xfer_len; | |
333 | + dxferp = (void*)(unsigned long)hdr->din_xferp; | |
334 | + } else | |
335 | + dxfer_len = 0; | |
308 | 336 | |
309 | - u_iov = hdr->dxferp; | |
310 | - for (ret = 0, i = 0; i < hdr->iovec_count; i++, u_iov++) { | |
311 | - if (copy_from_user(&iov, u_iov, sizeof(iov))) { | |
312 | - ret = -EFAULT; | |
313 | - break; | |
337 | + if (dxfer_len) { | |
338 | + ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); | |
339 | + if (ret) { | |
340 | + dprintk("failed map at %d\n", ret); | |
341 | + blk_put_request(rq); | |
342 | + rq = ERR_PTR(ret); | |
314 | 343 | } |
315 | - | |
316 | - if (!iov.iov_len || !iov.iov_base) { | |
317 | - ret = -EINVAL; | |
318 | - break; | |
319 | - } | |
320 | - | |
321 | - ret = blk_rq_map_user(q, rq, iov.iov_base, iov.iov_len); | |
322 | - if (ret) | |
323 | - break; | |
324 | 344 | } |
325 | 345 | |
326 | - /* | |
327 | - * bugger, cleanup | |
328 | - */ | |
329 | - if (ret) { | |
330 | -out: | |
331 | - dprintk("failed map at %d: %d\n", i, ret); | |
332 | - blk_unmap_sghdr_rq(rq, hdr); | |
333 | - rq = ERR_PTR(ret); | |
334 | - } | |
335 | - | |
336 | 346 | return rq; |
337 | 347 | } |
338 | 348 | |
... | ... | @@ -346,7 +356,7 @@ |
346 | 356 | struct bsg_device *bd = bc->bd; |
347 | 357 | unsigned long flags; |
348 | 358 | |
349 | - dprintk("%s: finished rq %p bc %p, bio %p offset %d stat %d\n", | |
359 | + dprintk("%s: finished rq %p bc %p, bio %p offset %Zd stat %d\n", | |
350 | 360 | bd->name, rq, bc, bc->bio, bc - bd->cmd_map, uptodate); |
351 | 361 | |
352 | 362 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); |
... | ... | @@ -434,6 +444,42 @@ |
434 | 444 | return __bsg_get_done_cmd(bd, TASK_UNINTERRUPTIBLE); |
435 | 445 | } |
436 | 446 | |
447 | +static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | |
448 | + struct bio *bio) | |
449 | +{ | |
450 | + int ret = 0; | |
451 | + | |
452 | + dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); | |
453 | + /* | |
454 | + * fill in all the output members | |
455 | + */ | |
456 | + hdr->device_status = status_byte(rq->errors); | |
457 | + hdr->transport_status = host_byte(rq->errors); | |
458 | + hdr->driver_status = driver_byte(rq->errors); | |
459 | + hdr->info = 0; | |
460 | + if (hdr->device_status || hdr->transport_status || hdr->driver_status) | |
461 | + hdr->info |= SG_INFO_CHECK; | |
462 | + hdr->din_resid = rq->data_len; | |
463 | + hdr->response_len = 0; | |
464 | + | |
465 | + if (rq->sense_len && hdr->response) { | |
466 | + int len = min((unsigned int) hdr->max_response_len, | |
467 | + rq->sense_len); | |
468 | + | |
469 | + ret = copy_to_user((void*)(unsigned long)hdr->response, | |
470 | + rq->sense, len); | |
471 | + if (!ret) | |
472 | + hdr->response_len = len; | |
473 | + else | |
474 | + ret = -EFAULT; | |
475 | + } | |
476 | + | |
477 | + blk_rq_unmap_user(bio); | |
478 | + blk_put_request(rq); | |
479 | + | |
480 | + return ret; | |
481 | +} | |
482 | + | |
437 | 483 | static int bsg_complete_all_commands(struct bsg_device *bd) |
438 | 484 | { |
439 | 485 | struct bsg_command *bc; |
... | ... | @@ -476,7 +522,7 @@ |
476 | 522 | break; |
477 | 523 | } |
478 | 524 | |
479 | - tret = blk_complete_sghdr_rq(bc->rq, &bc->hdr, bc->bio); | |
525 | + tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio); | |
480 | 526 | if (!ret) |
481 | 527 | ret = tret; |
482 | 528 | |
483 | 529 | |
... | ... | @@ -495,11 +541,11 @@ |
495 | 541 | struct bsg_command *bc; |
496 | 542 | int nr_commands, ret; |
497 | 543 | |
498 | - if (count % sizeof(struct sg_io_hdr)) | |
544 | + if (count % sizeof(struct sg_io_v4)) | |
499 | 545 | return -EINVAL; |
500 | 546 | |
501 | 547 | ret = 0; |
502 | - nr_commands = count / sizeof(struct sg_io_hdr); | |
548 | + nr_commands = count / sizeof(struct sg_io_v4); | |
503 | 549 | while (nr_commands) { |
504 | 550 | bc = get_bc(bd, iov); |
505 | 551 | if (IS_ERR(bc)) { |
... | ... | @@ -512,7 +558,7 @@ |
512 | 558 | * after completing the request. so do that here, |
513 | 559 | * bsg_complete_work() cannot do that for us |
514 | 560 | */ |
515 | - ret = blk_complete_sghdr_rq(bc->rq, &bc->hdr, bc->bio); | |
561 | + ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio); | |
516 | 562 | |
517 | 563 | if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr))) |
518 | 564 | ret = -EFAULT; |
... | ... | @@ -522,8 +568,8 @@ |
522 | 568 | if (ret) |
523 | 569 | break; |
524 | 570 | |
525 | - buf += sizeof(struct sg_io_hdr); | |
526 | - *bytes_read += sizeof(struct sg_io_hdr); | |
571 | + buf += sizeof(struct sg_io_v4); | |
572 | + *bytes_read += sizeof(struct sg_io_v4); | |
527 | 573 | nr_commands--; |
528 | 574 | } |
529 | 575 | |
530 | 576 | |
531 | 577 | |
... | ... | @@ -582,16 +628,15 @@ |
582 | 628 | struct request *rq; |
583 | 629 | int ret, nr_commands; |
584 | 630 | |
585 | - if (count % sizeof(struct sg_io_hdr)) | |
631 | + if (count % sizeof(struct sg_io_v4)) | |
586 | 632 | return -EINVAL; |
587 | 633 | |
588 | - nr_commands = count / sizeof(struct sg_io_hdr); | |
634 | + nr_commands = count / sizeof(struct sg_io_v4); | |
589 | 635 | rq = NULL; |
590 | 636 | bc = NULL; |
591 | 637 | ret = 0; |
592 | 638 | while (nr_commands) { |
593 | 639 | request_queue_t *q = bd->queue; |
594 | - int rw = READ; | |
595 | 640 | |
596 | 641 | bc = bsg_get_command(bd); |
597 | 642 | if (!bc) |
... | ... | @@ -602,7 +647,7 @@ |
602 | 647 | break; |
603 | 648 | } |
604 | 649 | |
605 | - bc->uhdr = (struct sg_io_hdr __user *) buf; | |
650 | + bc->uhdr = (struct sg_io_v4 __user *) buf; | |
606 | 651 | if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { |
607 | 652 | ret = -EFAULT; |
608 | 653 | break; |
... | ... | @@ -611,7 +656,7 @@ |
611 | 656 | /* |
612 | 657 | * get a request, fill in the blanks, and add to request queue |
613 | 658 | */ |
614 | - rq = bsg_map_hdr(bd, rw, &bc->hdr); | |
659 | + rq = bsg_map_hdr(bd, &bc->hdr); | |
615 | 660 | if (IS_ERR(rq)) { |
616 | 661 | ret = PTR_ERR(rq); |
617 | 662 | rq = NULL; |
618 | 663 | |
... | ... | @@ -622,12 +667,10 @@ |
622 | 667 | bc = NULL; |
623 | 668 | rq = NULL; |
624 | 669 | nr_commands--; |
625 | - buf += sizeof(struct sg_io_hdr); | |
626 | - *bytes_read += sizeof(struct sg_io_hdr); | |
670 | + buf += sizeof(struct sg_io_v4); | |
671 | + *bytes_read += sizeof(struct sg_io_v4); | |
627 | 672 | } |
628 | 673 | |
629 | - if (rq) | |
630 | - blk_unmap_sghdr_rq(rq, &bc->hdr); | |
631 | 674 | if (bc) |
632 | 675 | bsg_free_command(bc); |
633 | 676 | |
634 | 677 | |
... | ... | @@ -898,11 +941,12 @@ |
898 | 941 | case SG_GET_RESERVED_SIZE: |
899 | 942 | case SG_SET_RESERVED_SIZE: |
900 | 943 | case SG_EMULATED_HOST: |
901 | - case SG_IO: | |
902 | 944 | case SCSI_IOCTL_SEND_COMMAND: { |
903 | 945 | void __user *uarg = (void __user *) arg; |
904 | 946 | return scsi_cmd_ioctl(file, bd->disk, cmd, uarg); |
905 | 947 | } |
948 | + case SG_IO: | |
949 | + return -EINVAL; | |
906 | 950 | /* |
907 | 951 | * block device ioctls |
908 | 952 | */ |