Commit 40cbbb781d3eba5d6ac0860db078af490e5c7c6b
Committed by
Jens Axboe
1 parent
b243ddcbe9
block: implement and use [__]blk_end_request_all()
There are many [__]blk_end_request() call sites which call it with full request length and expect full completion. Many of them ensure that the request actually completes by doing BUG_ON() the return value, which is awkward and error-prone. This patch adds [__]blk_end_request_all() which takes @rq and @error and fully completes the request. BUG_ON() is added to to ensure that this actually happens. Most conversions are simple but there are a few noteworthy ones. * cdrom/viocd: viocd_end_request() replaced with direct calls to __blk_end_request_all(). * s390/block/dasd: dasd_end_request() replaced with direct calls to __blk_end_request_all(). * s390/char/tape_block: tapeblock_end_request() replaced with direct calls to blk_end_request_all(). [ Impact: cleanup ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Mike Miller <mike.miller@hp.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Showing 15 changed files with 58 additions and 73 deletions Side-by-side Diff
- arch/arm/plat-omap/mailbox.c
- block/blk-barrier.c
- block/blk-core.c
- block/elevator.c
- drivers/block/cpqarray.c
- drivers/block/sx8.c
- drivers/block/virtio_blk.c
- drivers/block/xen-blkfront.c
- drivers/cdrom/gdrom.c
- drivers/cdrom/viocd.c
- drivers/memstick/core/mspro_block.c
- drivers/s390/block/dasd.c
- drivers/s390/char/tape_block.c
- drivers/scsi/scsi_lib.c
- include/linux/blkdev.h
arch/arm/plat-omap/mailbox.c
... | ... | @@ -192,8 +192,7 @@ |
192 | 192 | } |
193 | 193 | |
194 | 194 | spin_lock(q->queue_lock); |
195 | - if (__blk_end_request(rq, 0, 0)) | |
196 | - BUG(); | |
195 | + __blk_end_request_all(rq, 0); | |
197 | 196 | spin_unlock(q->queue_lock); |
198 | 197 | } |
199 | 198 | } |
... | ... | @@ -224,10 +223,7 @@ |
224 | 223 | break; |
225 | 224 | |
226 | 225 | msg = (mbox_msg_t) rq->data; |
227 | - | |
228 | - if (blk_end_request(rq, 0, 0)) | |
229 | - BUG(); | |
230 | - | |
226 | + blk_end_request_all(rq, 0); | |
231 | 227 | mbox->rxq->callback((void *)msg); |
232 | 228 | } |
233 | 229 | } |
... | ... | @@ -337,8 +333,7 @@ |
337 | 333 | |
338 | 334 | *p = (mbox_msg_t) rq->data; |
339 | 335 | |
340 | - if (blk_end_request(rq, 0, 0)) | |
341 | - BUG(); | |
336 | + blk_end_request_all(rq, 0); | |
342 | 337 | |
343 | 338 | if (unlikely(mbox_seq_test(mbox, *p))) { |
344 | 339 | pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p); |
block/blk-barrier.c
... | ... | @@ -106,10 +106,7 @@ |
106 | 106 | */ |
107 | 107 | q->ordseq = 0; |
108 | 108 | rq = q->orig_bar_rq; |
109 | - | |
110 | - if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) | |
111 | - BUG(); | |
112 | - | |
109 | + __blk_end_request_all(rq, q->orderr); | |
113 | 110 | return true; |
114 | 111 | } |
115 | 112 | |
... | ... | @@ -252,9 +249,7 @@ |
252 | 249 | * with prejudice. |
253 | 250 | */ |
254 | 251 | elv_dequeue_request(q, rq); |
255 | - if (__blk_end_request(rq, -EOPNOTSUPP, | |
256 | - blk_rq_bytes(rq))) | |
257 | - BUG(); | |
252 | + __blk_end_request_all(rq, -EOPNOTSUPP); | |
258 | 253 | *rqp = NULL; |
259 | 254 | return false; |
260 | 255 | } |
block/blk-core.c
... | ... | @@ -1780,7 +1780,7 @@ |
1780 | 1780 | break; |
1781 | 1781 | } else if (ret == BLKPREP_KILL) { |
1782 | 1782 | rq->cmd_flags |= REQ_QUIET; |
1783 | - __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); | |
1783 | + __blk_end_request_all(rq, -EIO); | |
1784 | 1784 | } else { |
1785 | 1785 | printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); |
1786 | 1786 | break; |
block/elevator.c
... | ... | @@ -810,7 +810,7 @@ |
810 | 810 | rq = list_entry_rq(q->queue_head.next); |
811 | 811 | rq->cmd_flags |= REQ_QUIET; |
812 | 812 | trace_block_rq_abort(q, rq); |
813 | - __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); | |
813 | + __blk_end_request_all(rq, -EIO); | |
814 | 814 | } |
815 | 815 | } |
816 | 816 | EXPORT_SYMBOL(elv_abort_queue); |
drivers/block/cpqarray.c
drivers/block/sx8.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
... | ... | @@ -551,7 +551,6 @@ |
551 | 551 | |
552 | 552 | for (i = info->ring.rsp_cons; i != rp; i++) { |
553 | 553 | unsigned long id; |
554 | - int ret; | |
555 | 554 | |
556 | 555 | bret = RING_GET_RESPONSE(&info->ring, i); |
557 | 556 | id = bret->id; |
... | ... | @@ -578,8 +577,7 @@ |
578 | 577 | dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " |
579 | 578 | "request: %x\n", bret->status); |
580 | 579 | |
581 | - ret = __blk_end_request(req, error, blk_rq_bytes(req)); | |
582 | - BUG_ON(ret); | |
580 | + __blk_end_request_all(req, error); | |
583 | 581 | break; |
584 | 582 | default: |
585 | 583 | BUG(); |
drivers/cdrom/gdrom.c
... | ... | @@ -632,7 +632,7 @@ |
632 | 632 | * before handling ending the request */ |
633 | 633 | spin_lock(&gdrom_lock); |
634 | 634 | list_del_init(&req->queuelist); |
635 | - __blk_end_request(req, err, blk_rq_bytes(req)); | |
635 | + __blk_end_request_all(req, err); | |
636 | 636 | } |
637 | 637 | spin_unlock(&gdrom_lock); |
638 | 638 | kfree(read_command); |
drivers/cdrom/viocd.c
... | ... | @@ -291,23 +291,6 @@ |
291 | 291 | return 0; |
292 | 292 | } |
293 | 293 | |
294 | -static void viocd_end_request(struct request *req, int error) | |
295 | -{ | |
296 | - int nsectors = req->hard_nr_sectors; | |
297 | - | |
298 | - /* | |
299 | - * Make sure it's fully ended, and ensure that we process | |
300 | - * at least one sector. | |
301 | - */ | |
302 | - if (blk_pc_request(req)) | |
303 | - nsectors = (req->data_len + 511) >> 9; | |
304 | - if (!nsectors) | |
305 | - nsectors = 1; | |
306 | - | |
307 | - if (__blk_end_request(req, error, nsectors << 9)) | |
308 | - BUG(); | |
309 | -} | |
310 | - | |
311 | 294 | static int rwreq; |
312 | 295 | |
313 | 296 | static void do_viocd_request(struct request_queue *q) |
314 | 297 | |
... | ... | @@ -316,11 +299,11 @@ |
316 | 299 | |
317 | 300 | while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { |
318 | 301 | if (!blk_fs_request(req)) |
319 | - viocd_end_request(req, -EIO); | |
302 | + __blk_end_request_all(req, -EIO); | |
320 | 303 | else if (send_request(req) < 0) { |
321 | 304 | printk(VIOCD_KERN_WARNING |
322 | 305 | "unable to send message to OS/400!"); |
323 | - viocd_end_request(req, -EIO); | |
306 | + __blk_end_request_all(req, -EIO); | |
324 | 307 | } else |
325 | 308 | rwreq++; |
326 | 309 | } |
327 | 310 | |
... | ... | @@ -531,9 +514,9 @@ |
531 | 514 | "with rc %d:0x%04X: %s\n", |
532 | 515 | req, event->xRc, |
533 | 516 | bevent->sub_result, err->msg); |
534 | - viocd_end_request(req, -EIO); | |
517 | + __blk_end_request_all(req, -EIO); | |
535 | 518 | } else |
536 | - viocd_end_request(req, 0); | |
519 | + __blk_end_request_all(req, 0); | |
537 | 520 | |
538 | 521 | /* restart handling of incoming requests */ |
539 | 522 | spin_unlock_irqrestore(&viocd_reqlock, flags); |
drivers/memstick/core/mspro_block.c
drivers/s390/block/dasd.c
... | ... | @@ -1614,15 +1614,6 @@ |
1614 | 1614 | } |
1615 | 1615 | |
1616 | 1616 | /* |
1617 | - * posts the buffer_cache about a finalized request | |
1618 | - */ | |
1619 | -static inline void dasd_end_request(struct request *req, int error) | |
1620 | -{ | |
1621 | - if (__blk_end_request(req, error, blk_rq_bytes(req))) | |
1622 | - BUG(); | |
1623 | -} | |
1624 | - | |
1625 | -/* | |
1626 | 1617 | * Process finished error recovery ccw. |
1627 | 1618 | */ |
1628 | 1619 | static inline void __dasd_block_process_erp(struct dasd_block *block, |
... | ... | @@ -1676,7 +1667,7 @@ |
1676 | 1667 | "Rejecting write request %p", |
1677 | 1668 | req); |
1678 | 1669 | blkdev_dequeue_request(req); |
1679 | - dasd_end_request(req, -EIO); | |
1670 | + __blk_end_request_all(req, -EIO); | |
1680 | 1671 | continue; |
1681 | 1672 | } |
1682 | 1673 | cqr = basedev->discipline->build_cp(basedev, block, req); |
... | ... | @@ -1705,7 +1696,7 @@ |
1705 | 1696 | "on request %p", |
1706 | 1697 | PTR_ERR(cqr), req); |
1707 | 1698 | blkdev_dequeue_request(req); |
1708 | - dasd_end_request(req, -EIO); | |
1699 | + __blk_end_request_all(req, -EIO); | |
1709 | 1700 | continue; |
1710 | 1701 | } |
1711 | 1702 | /* |
... | ... | @@ -1731,7 +1722,7 @@ |
1731 | 1722 | status = cqr->block->base->discipline->free_cp(cqr, req); |
1732 | 1723 | if (status <= 0) |
1733 | 1724 | error = status ? status : -EIO; |
1734 | - dasd_end_request(req, error); | |
1725 | + __blk_end_request_all(req, error); | |
1735 | 1726 | } |
1736 | 1727 | |
1737 | 1728 | /* |
... | ... | @@ -2040,7 +2031,7 @@ |
2040 | 2031 | spin_lock_irq(&block->request_queue_lock); |
2041 | 2032 | while ((req = elv_next_request(block->request_queue))) { |
2042 | 2033 | blkdev_dequeue_request(req); |
2043 | - dasd_end_request(req, -EIO); | |
2034 | + __blk_end_request_all(req, -EIO); | |
2044 | 2035 | } |
2045 | 2036 | spin_unlock_irq(&block->request_queue_lock); |
2046 | 2037 | } |
drivers/s390/char/tape_block.c
... | ... | @@ -74,13 +74,6 @@ |
74 | 74 | * Post finished request. |
75 | 75 | */ |
76 | 76 | static void |
77 | -tapeblock_end_request(struct request *req, int error) | |
78 | -{ | |
79 | - if (blk_end_request(req, error, blk_rq_bytes(req))) | |
80 | - BUG(); | |
81 | -} | |
82 | - | |
83 | -static void | |
84 | 77 | __tapeblock_end_request(struct tape_request *ccw_req, void *data) |
85 | 78 | { |
86 | 79 | struct tape_device *device; |
... | ... | @@ -90,7 +83,7 @@ |
90 | 83 | |
91 | 84 | device = ccw_req->device; |
92 | 85 | req = (struct request *) data; |
93 | - tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO); | |
86 | + blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO); | |
94 | 87 | if (ccw_req->rc == 0) |
95 | 88 | /* Update position. */ |
96 | 89 | device->blk_data.block_position = |
... | ... | @@ -118,7 +111,7 @@ |
118 | 111 | ccw_req = device->discipline->bread(device, req); |
119 | 112 | if (IS_ERR(ccw_req)) { |
120 | 113 | DBF_EVENT(1, "TBLOCK: bread failed\n"); |
121 | - tapeblock_end_request(req, -EIO); | |
114 | + blk_end_request_all(req, -EIO); | |
122 | 115 | return PTR_ERR(ccw_req); |
123 | 116 | } |
124 | 117 | ccw_req->callback = __tapeblock_end_request; |
... | ... | @@ -131,7 +124,7 @@ |
131 | 124 | * Start/enqueueing failed. No retries in |
132 | 125 | * this case. |
133 | 126 | */ |
134 | - tapeblock_end_request(req, -EIO); | |
127 | + blk_end_request_all(req, -EIO); | |
135 | 128 | device->discipline->free_bread(ccw_req); |
136 | 129 | } |
137 | 130 | |
... | ... | @@ -177,7 +170,7 @@ |
177 | 170 | DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); |
178 | 171 | blkdev_dequeue_request(req); |
179 | 172 | spin_unlock_irq(&device->blk_data.request_queue_lock); |
180 | - tapeblock_end_request(req, -EIO); | |
173 | + blk_end_request_all(req, -EIO); | |
181 | 174 | spin_lock_irq(&device->blk_data.request_queue_lock); |
182 | 175 | continue; |
183 | 176 | } |
drivers/scsi/scsi_lib.c
include/linux/blkdev.h
... | ... | @@ -883,6 +883,22 @@ |
883 | 883 | } |
884 | 884 | |
885 | 885 | /** |
886 | + * blk_end_request_all - Helper function for drives to finish the request. | |
887 | + * @rq: the request to finish | |
888 | + * @err: %0 for success, < %0 for error | |
889 | + * | |
890 | + * Description: | |
891 | + * Completely finish @rq. | |
892 | + */ | |
893 | +static inline void blk_end_request_all(struct request *rq, int error) | |
894 | +{ | |
895 | + bool pending; | |
896 | + | |
897 | + pending = blk_end_request(rq, error, blk_rq_bytes(rq)); | |
898 | + BUG_ON(pending); | |
899 | +} | |
900 | + | |
901 | +/** | |
886 | 902 | * __blk_end_request - Helper function for drivers to complete the request. |
887 | 903 | * @rq: the request being processed |
888 | 904 | * @error: %0 for success, < %0 for error |
... | ... | @@ -899,6 +915,22 @@ |
899 | 915 | unsigned int nr_bytes) |
900 | 916 | { |
901 | 917 | return __blk_end_bidi_request(rq, error, nr_bytes, 0); |
918 | +} | |
919 | + | |
920 | +/** | |
921 | + * __blk_end_request_all - Helper function for drives to finish the request. | |
922 | + * @rq: the request to finish | |
923 | + * @err: %0 for success, < %0 for error | |
924 | + * | |
925 | + * Description: | |
926 | + * Completely finish @rq. Must be called with queue lock held. | |
927 | + */ | |
928 | +static inline void __blk_end_request_all(struct request *rq, int error) | |
929 | +{ | |
930 | + bool pending; | |
931 | + | |
932 | + pending = __blk_end_request(rq, error, blk_rq_bytes(rq)); | |
933 | + BUG_ON(pending); | |
902 | 934 | } |
903 | 935 | |
904 | 936 | /** |