Commit 82124d60354846623a4b94af335717a5e142a074
Committed by
Jens Axboe
1 parent
32fab448e5
Exists in
master
and in
7 other branches
block: add request submission interface
This patch adds blk_insert_cloned_request(), a generic request submission interface for request stacking drivers. Request-based dm will use it to submit their clones to underlying devices. blk_rq_check_limits() is also added because it is possible that the lower queue has stronger limitations than the upper queue if multiple drivers are stacking at request-level. Not only for blk_insert_cloned_request()'s internal use, the function will be used by request-based dm when the queue limitation is modified (e.g. by replacing dm's table). Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Showing 2 changed files with 84 additions and 0 deletions Side-by-side Diff
block/blk-core.c
... | ... | @@ -1530,6 +1530,87 @@ |
1530 | 1530 | EXPORT_SYMBOL(submit_bio); |
1531 | 1531 | |
1532 | 1532 | /** |
1533 | + * blk_rq_check_limits - Helper function to check a request for the queue limit | |
1534 | + * @q: the queue | |
1535 | + * @rq: the request being checked | |
1536 | + * | |
1537 | + * Description: | |
1538 | + * @rq may have been made based on weaker limitations of upper-level queues | |
1539 | + * in request stacking drivers, and it may violate the limitation of @q. | |
1540 | + * Since the block layer and the underlying device driver trust @rq | |
1541 | + * after it is inserted to @q, it should be checked against @q before | |
1542 | + * the insertion using this generic function. | |
1543 | + * | |
1544 | + * This function should also be useful for request stacking drivers | |
1545 | + * in some cases below, so export this fuction. | |
1546 | + * Request stacking drivers like request-based dm may change the queue | |
1547 | + * limits while requests are in the queue (e.g. dm's table swapping). | |
1548 | + * Such request stacking drivers should check those requests agaist | |
1549 | + * the new queue limits again when they dispatch those requests, | |
1550 | + * although such checkings are also done against the old queue limits | |
1551 | + * when submitting requests. | |
1552 | + */ | |
1553 | +int blk_rq_check_limits(struct request_queue *q, struct request *rq) | |
1554 | +{ | |
1555 | + if (rq->nr_sectors > q->max_sectors || | |
1556 | + rq->data_len > q->max_hw_sectors << 9) { | |
1557 | + printk(KERN_ERR "%s: over max size limit.\n", __func__); | |
1558 | + return -EIO; | |
1559 | + } | |
1560 | + | |
1561 | + /* | |
1562 | + * queue's settings related to segment counting like q->bounce_pfn | |
1563 | + * may differ from that of other stacking queues. | |
1564 | + * Recalculate it to check the request correctly on this queue's | |
1565 | + * limitation. | |
1566 | + */ | |
1567 | + blk_recalc_rq_segments(rq); | |
1568 | + if (rq->nr_phys_segments > q->max_phys_segments || | |
1569 | + rq->nr_phys_segments > q->max_hw_segments) { | |
1570 | + printk(KERN_ERR "%s: over max segments limit.\n", __func__); | |
1571 | + return -EIO; | |
1572 | + } | |
1573 | + | |
1574 | + return 0; | |
1575 | +} | |
1576 | +EXPORT_SYMBOL_GPL(blk_rq_check_limits); | |
1577 | + | |
1578 | +/** | |
1579 | + * blk_insert_cloned_request - Helper for stacking drivers to submit a request | |
1580 | + * @q: the queue to submit the request | |
1581 | + * @rq: the request being queued | |
1582 | + */ | |
1583 | +int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |
1584 | +{ | |
1585 | + unsigned long flags; | |
1586 | + | |
1587 | + if (blk_rq_check_limits(q, rq)) | |
1588 | + return -EIO; | |
1589 | + | |
1590 | +#ifdef CONFIG_FAIL_MAKE_REQUEST | |
1591 | + if (rq->rq_disk && rq->rq_disk->part0.make_it_fail && | |
1592 | + should_fail(&fail_make_request, blk_rq_bytes(rq))) | |
1593 | + return -EIO; | |
1594 | +#endif | |
1595 | + | |
1596 | + spin_lock_irqsave(q->queue_lock, flags); | |
1597 | + | |
1598 | + /* | |
1599 | + * Submitting request must be dequeued before calling this function | |
1600 | + * because it will be linked to another request_queue | |
1601 | + */ | |
1602 | + BUG_ON(blk_queued_rq(rq)); | |
1603 | + | |
1604 | + drive_stat_acct(rq, 1); | |
1605 | + __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); | |
1606 | + | |
1607 | + spin_unlock_irqrestore(q->queue_lock, flags); | |
1608 | + | |
1609 | + return 0; | |
1610 | +} | |
1611 | +EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | |
1612 | + | |
1613 | +/** | |
1533 | 1614 | * __end_that_request_first - end I/O on a request |
1534 | 1615 | * @req: the request being processed |
1535 | 1616 | * @error: %0 for success, < %0 for error |
include/linux/blkdev.h
... | ... | @@ -693,6 +693,9 @@ |
693 | 693 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
694 | 694 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
695 | 695 | extern void blk_requeue_request(struct request_queue *, struct request *); |
696 | +extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | |
697 | +extern int blk_insert_cloned_request(struct request_queue *q, | |
698 | + struct request *rq); | |
696 | 699 | extern void blk_plug_device(struct request_queue *); |
697 | 700 | extern void blk_plug_device_unlocked(struct request_queue *); |
698 | 701 | extern int blk_remove_plug(struct request_queue *); |