Commit f06d9a2b52e246a66b606130cea3f0d7b7be17a7
Committed by
Jens Axboe
1 parent
40cbbb781d
block: replace end_request() with [__]blk_end_request_cur()
end_request() has been kept around for backward compatibility; however, it's about time for it to go away. * There aren't too many users left. * Its use of @updtodate is pretty confusing. * In some cases, newer code ends up using mixture of end_request() and [__]blk_end_request[_all](), which is way too confusing. So, add [__]blk_end_request_cur() and replace end_request() with it. Most conversions are straightforward. Noteworthy ones are... * paride/pcd: next_request() updated to take 0/-errno instead of 1/0. * paride/pf: pf_end_request() and next_request() updated to take 0/-errno instead of 1/0. * xd: xd_readwrite() updated to return 0/-errno instead of 1/0. * mtd/mtd_blkdevs: blktrans_discard_request() updated to return 0/-errno instead of 1/0. Unnecessary local variable res initialization removed from mtd_blktrans_thread(). [ Impact: cleanup ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Joerg Dorchain <joerg@dorchain.net> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Laurent Vivier <Laurent@lvivier.info> Cc: Tim Waugh <tim@cyberelk.net> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Markus Lidel <Markus.Lidel@shadowconnect.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: unsik Kim <donari75@gmail.com>
Showing 19 changed files with 128 additions and 126 deletions Side-by-side Diff
- drivers/block/amiflop.c
- drivers/block/ataflop.c
- drivers/block/hd.c
- drivers/block/mg_disk.c
- drivers/block/paride/pcd.c
- drivers/block/paride/pd.c
- drivers/block/paride/pf.c
- drivers/block/ps3disk.c
- drivers/block/swim.c
- drivers/block/swim3.c
- drivers/block/xd.c
- drivers/block/xen-blkfront.c
- drivers/block/xsysace.c
- drivers/block/z2ram.c
- drivers/cdrom/gdrom.c
- drivers/message/i2o/i2o_block.c
- drivers/mtd/mtd_blkdevs.c
- drivers/sbus/char/jsflash.c
- include/linux/blkdev.h
drivers/block/amiflop.c
... | ... | @@ -1359,7 +1359,7 @@ |
1359 | 1359 | #endif |
1360 | 1360 | block = CURRENT->sector + cnt; |
1361 | 1361 | if ((int)block > floppy->blocks) { |
1362 | - end_request(CURRENT, 0); | |
1362 | + __blk_end_request_cur(CURRENT, -EIO); | |
1363 | 1363 | goto repeat; |
1364 | 1364 | } |
1365 | 1365 | |
1366 | 1366 | |
... | ... | @@ -1373,11 +1373,11 @@ |
1373 | 1373 | |
1374 | 1374 | if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) { |
1375 | 1375 | printk(KERN_WARNING "do_fd_request: unknown command\n"); |
1376 | - end_request(CURRENT, 0); | |
1376 | + __blk_end_request_cur(CURRENT, -EIO); | |
1377 | 1377 | goto repeat; |
1378 | 1378 | } |
1379 | 1379 | if (get_track(drive, track) == -1) { |
1380 | - end_request(CURRENT, 0); | |
1380 | + __blk_end_request_cur(CURRENT, -EIO); | |
1381 | 1381 | goto repeat; |
1382 | 1382 | } |
1383 | 1383 | |
... | ... | @@ -1391,7 +1391,7 @@ |
1391 | 1391 | |
1392 | 1392 | /* keep the drive spinning while writes are scheduled */ |
1393 | 1393 | if (!fd_motor_on(drive)) { |
1394 | - end_request(CURRENT, 0); | |
1394 | + __blk_end_request_cur(CURRENT, -EIO); | |
1395 | 1395 | goto repeat; |
1396 | 1396 | } |
1397 | 1397 | /* |
... | ... | @@ -1410,7 +1410,7 @@ |
1410 | 1410 | CURRENT->nr_sectors -= CURRENT->current_nr_sectors; |
1411 | 1411 | CURRENT->sector += CURRENT->current_nr_sectors; |
1412 | 1412 | |
1413 | - end_request(CURRENT, 1); | |
1413 | + __blk_end_request_cur(CURRENT, 0); | |
1414 | 1414 | goto repeat; |
1415 | 1415 | } |
1416 | 1416 |
drivers/block/ataflop.c
... | ... | @@ -612,7 +612,7 @@ |
612 | 612 | CURRENT->errors++; |
613 | 613 | if (CURRENT->errors >= MAX_ERRORS) { |
614 | 614 | printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); |
615 | - end_request(CURRENT, 0); | |
615 | + __blk_end_request_cur(CURRENT, -EIO); | |
616 | 616 | } |
617 | 617 | else if (CURRENT->errors == RECALIBRATE_ERRORS) { |
618 | 618 | printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); |
... | ... | @@ -734,7 +734,7 @@ |
734 | 734 | /* all sectors finished */ |
735 | 735 | CURRENT->nr_sectors -= CURRENT->current_nr_sectors; |
736 | 736 | CURRENT->sector += CURRENT->current_nr_sectors; |
737 | - end_request(CURRENT, 1); | |
737 | + __blk_end_request_cur(CURRENT, 0); | |
738 | 738 | redo_fd_request(); |
739 | 739 | return; |
740 | 740 | } |
... | ... | @@ -1141,7 +1141,7 @@ |
1141 | 1141 | /* all sectors finished */ |
1142 | 1142 | CURRENT->nr_sectors -= CURRENT->current_nr_sectors; |
1143 | 1143 | CURRENT->sector += CURRENT->current_nr_sectors; |
1144 | - end_request(CURRENT, 1); | |
1144 | + __blk_end_request_cur(CURRENT, 0); | |
1145 | 1145 | redo_fd_request(); |
1146 | 1146 | } |
1147 | 1147 | return; |
... | ... | @@ -1414,7 +1414,7 @@ |
1414 | 1414 | if (!UD.connected) { |
1415 | 1415 | /* drive not connected */ |
1416 | 1416 | printk(KERN_ERR "Unknown Device: fd%d\n", drive ); |
1417 | - end_request(CURRENT, 0); | |
1417 | + __blk_end_request_cur(CURRENT, -EIO); | |
1418 | 1418 | goto repeat; |
1419 | 1419 | } |
1420 | 1420 | |
1421 | 1421 | |
... | ... | @@ -1430,12 +1430,12 @@ |
1430 | 1430 | /* user supplied disk type */ |
1431 | 1431 | if (--type >= NUM_DISK_MINORS) { |
1432 | 1432 | printk(KERN_WARNING "fd%d: invalid disk format", drive ); |
1433 | - end_request(CURRENT, 0); | |
1433 | + __blk_end_request_cur(CURRENT, -EIO); | |
1434 | 1434 | goto repeat; |
1435 | 1435 | } |
1436 | 1436 | if (minor2disktype[type].drive_types > DriveType) { |
1437 | 1437 | printk(KERN_WARNING "fd%d: unsupported disk format", drive ); |
1438 | - end_request(CURRENT, 0); | |
1438 | + __blk_end_request_cur(CURRENT, -EIO); | |
1439 | 1439 | goto repeat; |
1440 | 1440 | } |
1441 | 1441 | type = minor2disktype[type].index; |
... | ... | @@ -1445,7 +1445,7 @@ |
1445 | 1445 | } |
1446 | 1446 | |
1447 | 1447 | if (CURRENT->sector + 1 > UDT->blocks) { |
1448 | - end_request(CURRENT, 0); | |
1448 | + __blk_end_request_cur(CURRENT, -EIO); | |
1449 | 1449 | goto repeat; |
1450 | 1450 | } |
1451 | 1451 |
drivers/block/hd.c
... | ... | @@ -410,7 +410,7 @@ |
410 | 410 | if (req != NULL) { |
411 | 411 | struct hd_i_struct *disk = req->rq_disk->private_data; |
412 | 412 | if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) { |
413 | - end_request(req, 0); | |
413 | + __blk_end_request_cur(req, -EIO); | |
414 | 414 | disk->special_op = disk->recalibrate = 1; |
415 | 415 | } else if (req->errors % RESET_FREQ == 0) |
416 | 416 | reset = 1; |
... | ... | @@ -466,7 +466,7 @@ |
466 | 466 | req->buffer+512); |
467 | 467 | #endif |
468 | 468 | if (req->current_nr_sectors <= 0) |
469 | - end_request(req, 1); | |
469 | + __blk_end_request_cur(req, 0); | |
470 | 470 | if (i > 0) { |
471 | 471 | SET_HANDLER(&read_intr); |
472 | 472 | return; |
... | ... | @@ -505,7 +505,7 @@ |
505 | 505 | --req->current_nr_sectors; |
506 | 506 | req->buffer += 512; |
507 | 507 | if (!i || (req->bio && req->current_nr_sectors <= 0)) |
508 | - end_request(req, 1); | |
508 | + __blk_end_request_cur(req, 0); | |
509 | 509 | if (i > 0) { |
510 | 510 | SET_HANDLER(&write_intr); |
511 | 511 | outsw(HD_DATA, req->buffer, 256); |
... | ... | @@ -548,7 +548,7 @@ |
548 | 548 | #ifdef DEBUG |
549 | 549 | printk("%s: too many errors\n", name); |
550 | 550 | #endif |
551 | - end_request(CURRENT, 0); | |
551 | + __blk_end_request_cur(CURRENT, -EIO); | |
552 | 552 | } |
553 | 553 | hd_request(); |
554 | 554 | spin_unlock_irq(hd_queue->queue_lock); |
... | ... | @@ -563,7 +563,7 @@ |
563 | 563 | } |
564 | 564 | if (disk->head > 16) { |
565 | 565 | printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); |
566 | - end_request(req, 0); | |
566 | + __blk_end_request_cur(req, -EIO); | |
567 | 567 | } |
568 | 568 | disk->special_op = 0; |
569 | 569 | return 1; |
... | ... | @@ -607,7 +607,7 @@ |
607 | 607 | ((block+nsect) > get_capacity(req->rq_disk))) { |
608 | 608 | printk("%s: bad access: block=%d, count=%d\n", |
609 | 609 | req->rq_disk->disk_name, block, nsect); |
610 | - end_request(req, 0); | |
610 | + __blk_end_request_cur(req, -EIO); | |
611 | 611 | goto repeat; |
612 | 612 | } |
613 | 613 | |
... | ... | @@ -647,7 +647,7 @@ |
647 | 647 | break; |
648 | 648 | default: |
649 | 649 | printk("unknown hd-command\n"); |
650 | - end_request(req, 0); | |
650 | + __blk_end_request_cur(req, -EIO); | |
651 | 651 | break; |
652 | 652 | } |
653 | 653 | } |
drivers/block/mg_disk.c
... | ... | @@ -285,7 +285,7 @@ |
285 | 285 | if (req != NULL) |
286 | 286 | if (++req->errors >= MG_MAX_ERRORS || |
287 | 287 | host->error == MG_ERR_TIMEOUT) |
288 | - end_request(req, 0); | |
288 | + __blk_end_request_cur(req, -EIO); | |
289 | 289 | } |
290 | 290 | |
291 | 291 | static unsigned int mg_out(struct mg_host *host, |
... | ... | @@ -351,7 +351,7 @@ |
351 | 351 | |
352 | 352 | if (req->current_nr_sectors <= 0) { |
353 | 353 | MG_DBG("remain : %d sects\n", remains); |
354 | - end_request(req, 1); | |
354 | + __blk_end_request_cur(req, 0); | |
355 | 355 | if (remains > 0) |
356 | 356 | req = elv_next_request(host->breq); |
357 | 357 | } |
... | ... | @@ -395,7 +395,7 @@ |
395 | 395 | |
396 | 396 | if (req->current_nr_sectors <= 0) { |
397 | 397 | MG_DBG("remain : %d sects\n", remains); |
398 | - end_request(req, 1); | |
398 | + __blk_end_request_cur(req, 0); | |
399 | 399 | if (remains > 0) |
400 | 400 | req = elv_next_request(host->breq); |
401 | 401 | } |
... | ... | @@ -448,7 +448,7 @@ |
448 | 448 | |
449 | 449 | /* let know if current segment done */ |
450 | 450 | if (req->current_nr_sectors <= 0) |
451 | - end_request(req, 1); | |
451 | + __blk_end_request_cur(req, 0); | |
452 | 452 | |
453 | 453 | /* set handler if read remains */ |
454 | 454 | if (i > 0) { |
... | ... | @@ -497,7 +497,7 @@ |
497 | 497 | |
498 | 498 | /* let know if current segment or all done */ |
499 | 499 | if (!i || (req->bio && req->current_nr_sectors <= 0)) |
500 | - end_request(req, 1); | |
500 | + __blk_end_request_cur(req, 0); | |
501 | 501 | |
502 | 502 | /* write 1 sector and set handler if remains */ |
503 | 503 | if (i > 0) { |
... | ... | @@ -563,7 +563,7 @@ |
563 | 563 | default: |
564 | 564 | printk(KERN_WARNING "%s:%d unknown command\n", |
565 | 565 | __func__, __LINE__); |
566 | - end_request(req, 0); | |
566 | + __blk_end_request_cur(req, -EIO); | |
567 | 567 | break; |
568 | 568 | } |
569 | 569 | } |
... | ... | @@ -617,7 +617,7 @@ |
617 | 617 | default: |
618 | 618 | printk(KERN_WARNING "%s:%d unknown command\n", |
619 | 619 | __func__, __LINE__); |
620 | - end_request(req, 0); | |
620 | + __blk_end_request_cur(req, -EIO); | |
621 | 621 | break; |
622 | 622 | } |
623 | 623 | return MG_ERR_NONE; |
... | ... | @@ -655,7 +655,7 @@ |
655 | 655 | "%s: bad access: sector=%d, count=%d\n", |
656 | 656 | req->rq_disk->disk_name, |
657 | 657 | sect_num, sect_cnt); |
658 | - end_request(req, 0); | |
658 | + __blk_end_request_cur(req, -EIO); | |
659 | 659 | continue; |
660 | 660 | } |
661 | 661 |
drivers/block/paride/pcd.c
... | ... | @@ -735,16 +735,16 @@ |
735 | 735 | ps_set_intr(do_pcd_read, NULL, 0, nice); |
736 | 736 | return; |
737 | 737 | } else |
738 | - end_request(pcd_req, 0); | |
738 | + __blk_end_request_cur(pcd_req, -EIO); | |
739 | 739 | } |
740 | 740 | } |
741 | 741 | |
742 | -static inline void next_request(int success) | |
742 | +static inline void next_request(int err) | |
743 | 743 | { |
744 | 744 | unsigned long saved_flags; |
745 | 745 | |
746 | 746 | spin_lock_irqsave(&pcd_lock, saved_flags); |
747 | - end_request(pcd_req, success); | |
747 | + __blk_end_request_cur(pcd_req, err); | |
748 | 748 | pcd_busy = 0; |
749 | 749 | do_pcd_request(pcd_queue); |
750 | 750 | spin_unlock_irqrestore(&pcd_lock, saved_flags); |
... | ... | @@ -781,7 +781,7 @@ |
781 | 781 | |
782 | 782 | if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { |
783 | 783 | pcd_bufblk = -1; |
784 | - next_request(0); | |
784 | + next_request(-EIO); | |
785 | 785 | return; |
786 | 786 | } |
787 | 787 | |
... | ... | @@ -796,7 +796,7 @@ |
796 | 796 | pcd_retries = 0; |
797 | 797 | pcd_transfer(); |
798 | 798 | if (!pcd_count) { |
799 | - next_request(1); | |
799 | + next_request(0); | |
800 | 800 | return; |
801 | 801 | } |
802 | 802 | |
... | ... | @@ -815,7 +815,7 @@ |
815 | 815 | return; |
816 | 816 | } |
817 | 817 | pcd_bufblk = -1; |
818 | - next_request(0); | |
818 | + next_request(-EIO); | |
819 | 819 | return; |
820 | 820 | } |
821 | 821 |
drivers/block/paride/pd.c
... | ... | @@ -410,7 +410,8 @@ |
410 | 410 | pd_claimed = 0; |
411 | 411 | phase = NULL; |
412 | 412 | spin_lock_irqsave(&pd_lock, saved_flags); |
413 | - end_request(pd_req, res); | |
413 | + __blk_end_request_cur(pd_req, | |
414 | + res == Ok ? 0 : -EIO); | |
414 | 415 | pd_req = elv_next_request(pd_queue); |
415 | 416 | if (!pd_req) |
416 | 417 | stop = 1; |
... | ... | @@ -477,7 +478,7 @@ |
477 | 478 | if (pd_count) |
478 | 479 | return 0; |
479 | 480 | spin_lock_irqsave(&pd_lock, saved_flags); |
480 | - end_request(pd_req, 1); | |
481 | + __blk_end_request_cur(pd_req, 0); | |
481 | 482 | pd_count = pd_req->current_nr_sectors; |
482 | 483 | pd_buf = pd_req->buffer; |
483 | 484 | spin_unlock_irqrestore(&pd_lock, saved_flags); |
drivers/block/paride/pf.c
... | ... | @@ -750,10 +750,10 @@ |
750 | 750 | |
751 | 751 | static struct request_queue *pf_queue; |
752 | 752 | |
753 | -static void pf_end_request(int uptodate) | |
753 | +static void pf_end_request(int err) | |
754 | 754 | { |
755 | 755 | if (pf_req) { |
756 | - end_request(pf_req, uptodate); | |
756 | + __blk_end_request_cur(pf_req, err); | |
757 | 757 | pf_req = NULL; |
758 | 758 | } |
759 | 759 | } |
... | ... | @@ -773,7 +773,7 @@ |
773 | 773 | pf_count = pf_req->current_nr_sectors; |
774 | 774 | |
775 | 775 | if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { |
776 | - pf_end_request(0); | |
776 | + pf_end_request(-EIO); | |
777 | 777 | goto repeat; |
778 | 778 | } |
779 | 779 | |
... | ... | @@ -788,7 +788,7 @@ |
788 | 788 | pi_do_claimed(pf_current->pi, do_pf_write); |
789 | 789 | else { |
790 | 790 | pf_busy = 0; |
791 | - pf_end_request(0); | |
791 | + pf_end_request(-EIO); | |
792 | 792 | goto repeat; |
793 | 793 | } |
794 | 794 | } |
... | ... | @@ -805,7 +805,7 @@ |
805 | 805 | return 1; |
806 | 806 | if (!pf_count) { |
807 | 807 | spin_lock_irqsave(&pf_spin_lock, saved_flags); |
808 | - pf_end_request(1); | |
808 | + pf_end_request(0); | |
809 | 809 | pf_req = elv_next_request(pf_queue); |
810 | 810 | spin_unlock_irqrestore(&pf_spin_lock, saved_flags); |
811 | 811 | if (!pf_req) |
812 | 812 | |
... | ... | @@ -816,12 +816,12 @@ |
816 | 816 | return 0; |
817 | 817 | } |
818 | 818 | |
819 | -static inline void next_request(int success) | |
819 | +static inline void next_request(int err) | |
820 | 820 | { |
821 | 821 | unsigned long saved_flags; |
822 | 822 | |
823 | 823 | spin_lock_irqsave(&pf_spin_lock, saved_flags); |
824 | - pf_end_request(success); | |
824 | + pf_end_request(err); | |
825 | 825 | pf_busy = 0; |
826 | 826 | do_pf_request(pf_queue); |
827 | 827 | spin_unlock_irqrestore(&pf_spin_lock, saved_flags); |
... | ... | @@ -844,7 +844,7 @@ |
844 | 844 | pi_do_claimed(pf_current->pi, do_pf_read_start); |
845 | 845 | return; |
846 | 846 | } |
847 | - next_request(0); | |
847 | + next_request(-EIO); | |
848 | 848 | return; |
849 | 849 | } |
850 | 850 | pf_mask = STAT_DRQ; |
... | ... | @@ -863,7 +863,7 @@ |
863 | 863 | pi_do_claimed(pf_current->pi, do_pf_read_start); |
864 | 864 | return; |
865 | 865 | } |
866 | - next_request(0); | |
866 | + next_request(-EIO); | |
867 | 867 | return; |
868 | 868 | } |
869 | 869 | pi_read_block(pf_current->pi, pf_buf, 512); |
... | ... | @@ -871,7 +871,7 @@ |
871 | 871 | break; |
872 | 872 | } |
873 | 873 | pi_disconnect(pf_current->pi); |
874 | - next_request(1); | |
874 | + next_request(0); | |
875 | 875 | } |
876 | 876 | |
877 | 877 | static void do_pf_write(void) |
... | ... | @@ -890,7 +890,7 @@ |
890 | 890 | pi_do_claimed(pf_current->pi, do_pf_write_start); |
891 | 891 | return; |
892 | 892 | } |
893 | - next_request(0); | |
893 | + next_request(-EIO); | |
894 | 894 | return; |
895 | 895 | } |
896 | 896 | |
... | ... | @@ -903,7 +903,7 @@ |
903 | 903 | pi_do_claimed(pf_current->pi, do_pf_write_start); |
904 | 904 | return; |
905 | 905 | } |
906 | - next_request(0); | |
906 | + next_request(-EIO); | |
907 | 907 | return; |
908 | 908 | } |
909 | 909 | pi_write_block(pf_current->pi, pf_buf, 512); |
910 | 910 | |
... | ... | @@ -923,11 +923,11 @@ |
923 | 923 | pi_do_claimed(pf_current->pi, do_pf_write_start); |
924 | 924 | return; |
925 | 925 | } |
926 | - next_request(0); | |
926 | + next_request(-EIO); | |
927 | 927 | return; |
928 | 928 | } |
929 | 929 | pi_disconnect(pf_current->pi); |
930 | - next_request(1); | |
930 | + next_request(0); | |
931 | 931 | } |
932 | 932 | |
933 | 933 | static int __init pf_init(void) |
drivers/block/ps3disk.c
... | ... | @@ -158,7 +158,7 @@ |
158 | 158 | if (res) { |
159 | 159 | dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, |
160 | 160 | __LINE__, op, res); |
161 | - end_request(req, 0); | |
161 | + __blk_end_request_cur(req, -EIO); | |
162 | 162 | return 0; |
163 | 163 | } |
164 | 164 | |
... | ... | @@ -180,7 +180,7 @@ |
180 | 180 | if (res) { |
181 | 181 | dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", |
182 | 182 | __func__, __LINE__, res); |
183 | - end_request(req, 0); | |
183 | + __blk_end_request_cur(req, -EIO); | |
184 | 184 | return 0; |
185 | 185 | } |
186 | 186 | |
... | ... | @@ -205,7 +205,7 @@ |
205 | 205 | break; |
206 | 206 | } else { |
207 | 207 | blk_dump_rq_flags(req, DEVICE_NAME " bad request"); |
208 | - end_request(req, 0); | |
208 | + __blk_end_request_cur(req, -EIO); | |
209 | 209 | continue; |
210 | 210 | } |
211 | 211 | } |
drivers/block/swim.c
... | ... | @@ -532,39 +532,39 @@ |
532 | 532 | |
533 | 533 | fs = req->rq_disk->private_data; |
534 | 534 | if (req->sector < 0 || req->sector >= fs->total_secs) { |
535 | - end_request(req, 0); | |
535 | + __blk_end_request_cur(req, -EIO); | |
536 | 536 | continue; |
537 | 537 | } |
538 | 538 | if (req->current_nr_sectors == 0) { |
539 | - end_request(req, 1); | |
539 | + __blk_end_request_cur(req, 0); | |
540 | 540 | continue; |
541 | 541 | } |
542 | 542 | if (!fs->disk_in) { |
543 | - end_request(req, 0); | |
543 | + __blk_end_request_cur(req, -EIO); | |
544 | 544 | continue; |
545 | 545 | } |
546 | 546 | if (rq_data_dir(req) == WRITE) { |
547 | 547 | if (fs->write_protected) { |
548 | - end_request(req, 0); | |
548 | + __blk_end_request_cur(req, -EIO); | |
549 | 549 | continue; |
550 | 550 | } |
551 | 551 | } |
552 | 552 | switch (rq_data_dir(req)) { |
553 | 553 | case WRITE: |
554 | 554 | /* NOT IMPLEMENTED */ |
555 | - end_request(req, 0); | |
555 | + __blk_end_request_cur(req, -EIO); | |
556 | 556 | break; |
557 | 557 | case READ: |
558 | 558 | if (floppy_read_sectors(fs, req->sector, |
559 | 559 | req->current_nr_sectors, |
560 | 560 | req->buffer)) { |
561 | - end_request(req, 0); | |
561 | + __blk_end_request_cur(req, -EIO); | |
562 | 562 | continue; |
563 | 563 | } |
564 | 564 | req->nr_sectors -= req->current_nr_sectors; |
565 | 565 | req->sector += req->current_nr_sectors; |
566 | 566 | req->buffer += req->current_nr_sectors * 512; |
567 | - end_request(req, 1); | |
567 | + __blk_end_request_cur(req, 0); | |
568 | 568 | break; |
569 | 569 | } |
570 | 570 | } |
drivers/block/swim3.c
... | ... | @@ -320,15 +320,15 @@ |
320 | 320 | #endif |
321 | 321 | |
322 | 322 | if (req->sector < 0 || req->sector >= fs->total_secs) { |
323 | - end_request(req, 0); | |
323 | + __blk_end_request_cur(req, -EIO); | |
324 | 324 | continue; |
325 | 325 | } |
326 | 326 | if (req->current_nr_sectors == 0) { |
327 | - end_request(req, 1); | |
327 | + __blk_end_request_cur(req, 0); | |
328 | 328 | continue; |
329 | 329 | } |
330 | 330 | if (fs->ejected) { |
331 | - end_request(req, 0); | |
331 | + __blk_end_request_cur(req, -EIO); | |
332 | 332 | continue; |
333 | 333 | } |
334 | 334 | |
... | ... | @@ -336,7 +336,7 @@ |
336 | 336 | if (fs->write_prot < 0) |
337 | 337 | fs->write_prot = swim3_readbit(fs, WRITE_PROT); |
338 | 338 | if (fs->write_prot) { |
339 | - end_request(req, 0); | |
339 | + __blk_end_request_cur(req, -EIO); | |
340 | 340 | continue; |
341 | 341 | } |
342 | 342 | } |
... | ... | @@ -508,7 +508,7 @@ |
508 | 508 | case do_transfer: |
509 | 509 | if (fs->cur_cyl != fs->req_cyl) { |
510 | 510 | if (fs->retries > 5) { |
511 | - end_request(fd_req, 0); | |
511 | + __blk_end_request_cur(fd_req, -EIO); | |
512 | 512 | fs->state = idle; |
513 | 513 | return; |
514 | 514 | } |
... | ... | @@ -540,7 +540,7 @@ |
540 | 540 | out_8(&sw->intr_enable, 0); |
541 | 541 | fs->cur_cyl = -1; |
542 | 542 | if (fs->retries > 5) { |
543 | - end_request(fd_req, 0); | |
543 | + __blk_end_request_cur(fd_req, -EIO); | |
544 | 544 | fs->state = idle; |
545 | 545 | start_request(fs); |
546 | 546 | } else { |
... | ... | @@ -559,7 +559,7 @@ |
559 | 559 | out_8(&sw->select, RELAX); |
560 | 560 | out_8(&sw->intr_enable, 0); |
561 | 561 | printk(KERN_ERR "swim3: seek timeout\n"); |
562 | - end_request(fd_req, 0); | |
562 | + __blk_end_request_cur(fd_req, -EIO); | |
563 | 563 | fs->state = idle; |
564 | 564 | start_request(fs); |
565 | 565 | } |
... | ... | @@ -583,7 +583,7 @@ |
583 | 583 | return; |
584 | 584 | } |
585 | 585 | printk(KERN_ERR "swim3: seek settle timeout\n"); |
586 | - end_request(fd_req, 0); | |
586 | + __blk_end_request_cur(fd_req, -EIO); | |
587 | 587 | fs->state = idle; |
588 | 588 | start_request(fs); |
589 | 589 | } |
... | ... | @@ -615,7 +615,7 @@ |
615 | 615 | fd_req->current_nr_sectors -= s; |
616 | 616 | printk(KERN_ERR "swim3: timeout %sing sector %ld\n", |
617 | 617 | (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); |
618 | - end_request(fd_req, 0); | |
618 | + __blk_end_request_cur(fd_req, -EIO); | |
619 | 619 | fs->state = idle; |
620 | 620 | start_request(fs); |
621 | 621 | } |
... | ... | @@ -646,7 +646,7 @@ |
646 | 646 | printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); |
647 | 647 | fs->cur_cyl = -1; |
648 | 648 | if (fs->retries > 5) { |
649 | - end_request(fd_req, 0); | |
649 | + __blk_end_request_cur(fd_req, -EIO); | |
650 | 650 | fs->state = idle; |
651 | 651 | start_request(fs); |
652 | 652 | } else { |
... | ... | @@ -731,7 +731,7 @@ |
731 | 731 | printk("swim3: error %sing block %ld (err=%x)\n", |
732 | 732 | rq_data_dir(fd_req) == WRITE? "writ": "read", |
733 | 733 | (long)fd_req->sector, err); |
734 | - end_request(fd_req, 0); | |
734 | + __blk_end_request_cur(fd_req, -EIO); | |
735 | 735 | fs->state = idle; |
736 | 736 | } |
737 | 737 | } else { |
... | ... | @@ -740,7 +740,7 @@ |
740 | 740 | printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); |
741 | 741 | printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", |
742 | 742 | fs->state, rq_data_dir(fd_req), intr, err); |
743 | - end_request(fd_req, 0); | |
743 | + __blk_end_request_cur(fd_req, -EIO); | |
744 | 744 | fs->state = idle; |
745 | 745 | start_request(fs); |
746 | 746 | break; |
... | ... | @@ -749,7 +749,7 @@ |
749 | 749 | fd_req->current_nr_sectors -= fs->scount; |
750 | 750 | fd_req->buffer += fs->scount * 512; |
751 | 751 | if (fd_req->current_nr_sectors <= 0) { |
752 | - end_request(fd_req, 1); | |
752 | + __blk_end_request_cur(fd_req, 0); | |
753 | 753 | fs->state = idle; |
754 | 754 | } else { |
755 | 755 | fs->req_sector += fs->scount; |
drivers/block/xd.c
... | ... | @@ -314,21 +314,22 @@ |
314 | 314 | int retry; |
315 | 315 | |
316 | 316 | if (!blk_fs_request(req)) { |
317 | - end_request(req, 0); | |
317 | + __blk_end_request_cur(req, -EIO); | |
318 | 318 | continue; |
319 | 319 | } |
320 | 320 | if (block + count > get_capacity(req->rq_disk)) { |
321 | - end_request(req, 0); | |
321 | + __blk_end_request_cur(req, -EIO); | |
322 | 322 | continue; |
323 | 323 | } |
324 | 324 | if (rw != READ && rw != WRITE) { |
325 | 325 | printk("do_xd_request: unknown request\n"); |
326 | - end_request(req, 0); | |
326 | + __blk_end_request_cur(req, -EIO); | |
327 | 327 | continue; |
328 | 328 | } |
329 | 329 | for (retry = 0; (retry < XD_RETRIES) && !res; retry++) |
330 | 330 | res = xd_readwrite(rw, disk, req->buffer, block, count); |
331 | - end_request(req, res); /* wrap up, 0 = fail, 1 = success */ | |
331 | + /* wrap up, 0 = success, -errno = fail */ | |
332 | + __blk_end_request_cur(req, res); | |
332 | 333 | } |
333 | 334 | } |
334 | 335 | |
... | ... | @@ -418,7 +419,7 @@ |
418 | 419 | printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); |
419 | 420 | xd_recalibrate(drive); |
420 | 421 | spin_lock_irq(&xd_lock); |
421 | - return (0); | |
422 | + return -EIO; | |
422 | 423 | case 2: |
423 | 424 | if (sense[0] & 0x30) { |
424 | 425 | printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); |
... | ... | @@ -439,7 +440,7 @@ |
439 | 440 | else |
440 | 441 | printk(" - no valid disk address\n"); |
441 | 442 | spin_lock_irq(&xd_lock); |
442 | - return (0); | |
443 | + return -EIO; | |
443 | 444 | } |
444 | 445 | if (xd_dma_buffer) |
445 | 446 | for (i=0; i < (temp * 0x200); i++) |
... | ... | @@ -448,7 +449,7 @@ |
448 | 449 | count -= temp, buffer += temp * 0x200, block += temp; |
449 | 450 | } |
450 | 451 | spin_lock_irq(&xd_lock); |
451 | - return (1); | |
452 | + return 0; | |
452 | 453 | } |
453 | 454 | |
454 | 455 | /* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ |
drivers/block/xen-blkfront.c
drivers/block/xsysace.c
... | ... | @@ -466,7 +466,7 @@ |
466 | 466 | while ((req = elv_next_request(q)) != NULL) { |
467 | 467 | if (blk_fs_request(req)) |
468 | 468 | break; |
469 | - end_request(req, 0); | |
469 | + __blk_end_request_cur(req, -EIO); | |
470 | 470 | } |
471 | 471 | return req; |
472 | 472 | } |
... | ... | @@ -494,7 +494,7 @@ |
494 | 494 | |
495 | 495 | /* Drop all pending requests */ |
496 | 496 | while ((req = elv_next_request(ace->queue)) != NULL) |
497 | - end_request(req, 0); | |
497 | + __blk_end_request_cur(req, -EIO); | |
498 | 498 | |
499 | 499 | /* Drop back to IDLE state and notify waiters */ |
500 | 500 | ace->fsm_state = ACE_FSM_STATE_IDLE; |
drivers/block/z2ram.c
... | ... | @@ -77,7 +77,7 @@ |
77 | 77 | if (start + len > z2ram_size) { |
78 | 78 | printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", |
79 | 79 | req->sector, req->current_nr_sectors); |
80 | - end_request(req, 0); | |
80 | + __blk_end_request_cur(req, -EIO); | |
81 | 81 | continue; |
82 | 82 | } |
83 | 83 | while (len) { |
... | ... | @@ -93,7 +93,7 @@ |
93 | 93 | start += size; |
94 | 94 | len -= size; |
95 | 95 | } |
96 | - end_request(req, 1); | |
96 | + __blk_end_request_cur(req, 0); | |
97 | 97 | } |
98 | 98 | } |
99 | 99 |
drivers/cdrom/gdrom.c
... | ... | @@ -654,17 +654,17 @@ |
654 | 654 | while ((req = elv_next_request(rq)) != NULL) { |
655 | 655 | if (!blk_fs_request(req)) { |
656 | 656 | printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); |
657 | - end_request(req, 0); | |
657 | + __blk_end_request_cur(req, -EIO); | |
658 | 658 | } |
659 | 659 | if (rq_data_dir(req) != READ) { |
660 | 660 | printk(KERN_NOTICE "GDROM: Read only device -"); |
661 | 661 | printk(" write request ignored\n"); |
662 | - end_request(req, 0); | |
662 | + __blk_end_request_cur(req, -EIO); | |
663 | 663 | } |
664 | 664 | if (req->nr_sectors) |
665 | 665 | gdrom_request_handler_dma(req); |
666 | 666 | else |
667 | - end_request(req, 0); | |
667 | + __blk_end_request_cur(req, -EIO); | |
668 | 668 | } |
669 | 669 | } |
670 | 670 |
drivers/message/i2o/i2o_block.c
drivers/mtd/mtd_blkdevs.c
... | ... | @@ -54,33 +54,33 @@ |
54 | 54 | |
55 | 55 | if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && |
56 | 56 | req->cmd[0] == REQ_LB_OP_DISCARD) |
57 | - return !tr->discard(dev, block, nsect); | |
57 | + return tr->discard(dev, block, nsect); | |
58 | 58 | |
59 | 59 | if (!blk_fs_request(req)) |
60 | - return 0; | |
60 | + return -EIO; | |
61 | 61 | |
62 | 62 | if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) |
63 | - return 0; | |
63 | + return -EIO; | |
64 | 64 | |
65 | 65 | switch(rq_data_dir(req)) { |
66 | 66 | case READ: |
67 | 67 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
68 | 68 | if (tr->readsect(dev, block, buf)) |
69 | - return 0; | |
70 | - return 1; | |
69 | + return -EIO; | |
70 | + return 0; | |
71 | 71 | |
72 | 72 | case WRITE: |
73 | 73 | if (!tr->writesect) |
74 | - return 0; | |
74 | + return -EIO; | |
75 | 75 | |
76 | 76 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
77 | 77 | if (tr->writesect(dev, block, buf)) |
78 | - return 0; | |
79 | - return 1; | |
78 | + return -EIO; | |
79 | + return 0; | |
80 | 80 | |
81 | 81 | default: |
82 | 82 | printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); |
83 | - return 0; | |
83 | + return -EIO; | |
84 | 84 | } |
85 | 85 | } |
86 | 86 | |
... | ... | @@ -96,7 +96,7 @@ |
96 | 96 | while (!kthread_should_stop()) { |
97 | 97 | struct request *req; |
98 | 98 | struct mtd_blktrans_dev *dev; |
99 | - int res = 0; | |
99 | + int res; | |
100 | 100 | |
101 | 101 | req = elv_next_request(rq); |
102 | 102 | |
... | ... | @@ -119,7 +119,7 @@ |
119 | 119 | |
120 | 120 | spin_lock_irq(rq->queue_lock); |
121 | 121 | |
122 | - end_request(req, res); | |
122 | + __blk_end_request_cur(req, res); | |
123 | 123 | } |
124 | 124 | spin_unlock_irq(rq->queue_lock); |
125 | 125 |
drivers/sbus/char/jsflash.c
... | ... | @@ -192,25 +192,25 @@ |
192 | 192 | size_t len = req->current_nr_sectors << 9; |
193 | 193 | |
194 | 194 | if ((offset + len) > jdp->dsize) { |
195 | - end_request(req, 0); | |
195 | + __blk_end_request_cur(req, -EIO); | |
196 | 196 | continue; |
197 | 197 | } |
198 | 198 | |
199 | 199 | if (rq_data_dir(req) != READ) { |
200 | 200 | printk(KERN_ERR "jsfd: write\n"); |
201 | - end_request(req, 0); | |
201 | + __blk_end_request_cur(req, -EIO); | |
202 | 202 | continue; |
203 | 203 | } |
204 | 204 | |
205 | 205 | if ((jdp->dbase & 0xff000000) != 0x20000000) { |
206 | 206 | printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase); |
207 | - end_request(req, 0); | |
207 | + __blk_end_request_cur(req, -EIO); | |
208 | 208 | continue; |
209 | 209 | } |
210 | 210 | |
211 | 211 | jsfd_read(req->buffer, jdp->dbase + offset, len); |
212 | 212 | |
213 | - end_request(req, 1); | |
213 | + __blk_end_request_cur(req, 0); | |
214 | 214 | } |
215 | 215 | } |
216 | 216 |
include/linux/blkdev.h
... | ... | @@ -845,9 +845,8 @@ |
845 | 845 | * blk_update_request() completes given number of bytes and updates |
846 | 846 | * the request without completing it. |
847 | 847 | * |
848 | - * blk_end_request() and friends. __blk_end_request() and | |
849 | - * end_request() must be called with the request queue spinlock | |
850 | - * acquired. | |
848 | + * blk_end_request() and friends. __blk_end_request() must be called | |
849 | + * with the request queue spinlock acquired. | |
851 | 850 | * |
852 | 851 | * Several drivers define their own end_request and call |
853 | 852 | * blk_end_request() for parts of the original function. |
... | ... | @@ -899,6 +898,19 @@ |
899 | 898 | } |
900 | 899 | |
901 | 900 | /** |
901 | + * blk_end_request_cur - Helper function to finish the current request chunk. | |
902 | + * @rq: the request to finish the current chunk for | |
903 | + * @err: %0 for success, < %0 for error | |
904 | + * | |
905 | + * Description: | |
906 | + * Complete the current consecutively mapped chunk from @rq. | |
907 | + */ | |
908 | +static inline void blk_end_request_cur(struct request *rq, int error) | |
909 | +{ | |
910 | + blk_end_request(rq, error, rq->hard_cur_sectors << 9); | |
911 | +} | |
912 | + | |
913 | +/** | |
902 | 914 | * __blk_end_request - Helper function for drivers to complete the request. |
903 | 915 | * @rq: the request being processed |
904 | 916 | * @error: %0 for success, < %0 for error |
905 | 917 | |
906 | 918 | |
... | ... | @@ -934,29 +946,17 @@ |
934 | 946 | } |
935 | 947 | |
936 | 948 | /** |
937 | - * end_request - end I/O on the current segment of the request | |
938 | - * @rq: the request being processed | |
939 | - * @uptodate: error value or %0/%1 uptodate flag | |
949 | + * __blk_end_request_cur - Helper function to finish the current request chunk. | |
950 | + * @rq: the request to finish the current chunk for | |
951 | + * @err: %0 for success, < %0 for error | |
940 | 952 | * |
941 | 953 | * Description: |
942 | - * Ends I/O on the current segment of a request. If that is the only | |
943 | - * remaining segment, the request is also completed and freed. | |
944 | - * | |
945 | - * This is a remnant of how older block drivers handled I/O completions. | |
946 | - * Modern drivers typically end I/O on the full request in one go, unless | |
947 | - * they have a residual value to account for. For that case this function | |
948 | - * isn't really useful, unless the residual just happens to be the | |
949 | - * full current segment. In other words, don't use this function in new | |
950 | - * code. Use blk_end_request() or __blk_end_request() to end a request. | |
951 | - **/ | |
952 | -static inline void end_request(struct request *rq, int uptodate) | |
954 | + * Complete the current consecutively mapped chunk from @rq. Must | |
955 | + * be called with queue lock held. | |
956 | + */ | |
957 | +static inline void __blk_end_request_cur(struct request *rq, int error) | |
953 | 958 | { |
954 | - int error = 0; | |
955 | - | |
956 | - if (uptodate <= 0) | |
957 | - error = uptodate ? uptodate : -EIO; | |
958 | - | |
959 | - __blk_end_bidi_request(rq, error, rq->hard_cur_sectors << 9, 0); | |
959 | + __blk_end_request(rq, error, rq->hard_cur_sectors << 9); | |
960 | 960 | } |
961 | 961 | |
962 | 962 | extern void blk_complete_request(struct request *); |