Commit 7f7caf6aa74a4f4ad21ebe08bf23b594fce45ca7
Committed by
Nicholas Bellinger
1 parent
f01b9f7339
Exists in
master
and in
16 other branches
target: Pass through I/O topology for block backstores
In addition to block size (already implemented), passing through alignment offset, logical-to-phys block exponent, I/O granularity and optimal I/O length will allow initiators to properly handle layout on LUNs with 4K block sizes. Tested with various weird values via scsi_debug module. One thing to look at with this patch is the new block limits values -- instead of granularity 1 optimal 8192, Lio will now be returning whatever the block device says, which may affect performance. Signed-off-by: Andy Grover <agrover@redhat.com> Acked-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Showing 4 changed files with 68 additions and 3 deletions Side-by-side Diff
drivers/target/target_core_iblock.c
... | ... | @@ -710,6 +710,45 @@ |
710 | 710 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); |
711 | 711 | } |
712 | 712 | |
713 | +static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) | |
714 | +{ | |
715 | + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | |
716 | + struct block_device *bd = ib_dev->ibd_bd; | |
717 | + int ret; | |
718 | + | |
719 | + ret = bdev_alignment_offset(bd); | |
720 | + if (ret == -1) | |
721 | + return 0; | |
722 | + | |
723 | + /* convert offset-bytes to offset-lbas */ | |
724 | + return ret / bdev_logical_block_size(bd); | |
725 | +} | |
726 | + | |
727 | +static unsigned int iblock_get_lbppbe(struct se_device *dev) | |
728 | +{ | |
729 | + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | |
730 | + struct block_device *bd = ib_dev->ibd_bd; | |
731 | + int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd); | |
732 | + | |
733 | + return ilog2(logs_per_phys); | |
734 | +} | |
735 | + | |
736 | +static unsigned int iblock_get_io_min(struct se_device *dev) | |
737 | +{ | |
738 | + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | |
739 | + struct block_device *bd = ib_dev->ibd_bd; | |
740 | + | |
741 | + return bdev_io_min(bd); | |
742 | +} | |
743 | + | |
744 | +static unsigned int iblock_get_io_opt(struct se_device *dev) | |
745 | +{ | |
746 | + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | |
747 | + struct block_device *bd = ib_dev->ibd_bd; | |
748 | + | |
749 | + return bdev_io_opt(bd); | |
750 | +} | |
751 | + | |
713 | 752 | static struct sbc_ops iblock_sbc_ops = { |
714 | 753 | .execute_rw = iblock_execute_rw, |
715 | 754 | .execute_sync_cache = iblock_execute_sync_cache, |
... | ... | @@ -749,6 +788,10 @@ |
749 | 788 | .show_configfs_dev_params = iblock_show_configfs_dev_params, |
750 | 789 | .get_device_type = sbc_get_device_type, |
751 | 790 | .get_blocks = iblock_get_blocks, |
791 | + .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, | |
792 | + .get_lbppbe = iblock_get_lbppbe, | |
793 | + .get_io_min = iblock_get_io_min, | |
794 | + .get_io_opt = iblock_get_io_opt, | |
752 | 795 | .get_write_cache = iblock_get_write_cache, |
753 | 796 | }; |
754 | 797 |
drivers/target/target_core_sbc.c
... | ... | @@ -105,12 +105,22 @@ |
105 | 105 | buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; |
106 | 106 | buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; |
107 | 107 | buf[11] = dev->dev_attrib.block_size & 0xff; |
108 | + | |
109 | + if (dev->transport->get_lbppbe) | |
110 | + buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; | |
111 | + | |
112 | + if (dev->transport->get_alignment_offset_lbas) { | |
113 | + u16 lalba = dev->transport->get_alignment_offset_lbas(dev); | |
114 | + buf[14] = (lalba >> 8) & 0x3f; | |
115 | + buf[15] = lalba & 0xff; | |
116 | + } | |
117 | + | |
108 | 118 | /* |
109 | 119 | * Set Thin Provisioning Enable bit following sbc3r22 in section |
110 | 120 | * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. |
111 | 121 | */ |
112 | 122 | if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) |
113 | - buf[14] = 0x80; | |
123 | + buf[14] |= 0x80; | |
114 | 124 | |
115 | 125 | rbuf = transport_kmap_data_sg(cmd); |
116 | 126 | if (rbuf) { |
drivers/target/target_core_spc.c
... | ... | @@ -452,6 +452,7 @@ |
452 | 452 | struct se_device *dev = cmd->se_dev; |
453 | 453 | u32 max_sectors; |
454 | 454 | int have_tp = 0; |
455 | + int opt, min; | |
455 | 456 | |
456 | 457 | /* |
457 | 458 | * Following spc3r22 section 6.5.3 Block Limits VPD page, when |
... | ... | @@ -475,7 +476,10 @@ |
475 | 476 | /* |
476 | 477 | * Set OPTIMAL TRANSFER LENGTH GRANULARITY |
477 | 478 | */ |
478 | - put_unaligned_be16(1, &buf[6]); | |
479 | + if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) | |
480 | + put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); | |
481 | + else | |
482 | + put_unaligned_be16(1, &buf[6]); | |
479 | 483 | |
480 | 484 | /* |
481 | 485 | * Set MAXIMUM TRANSFER LENGTH |
... | ... | @@ -487,7 +491,10 @@ |
487 | 491 | /* |
488 | 492 | * Set OPTIMAL TRANSFER LENGTH |
489 | 493 | */ |
490 | - put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); | |
494 | + if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) | |
495 | + put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); | |
496 | + else | |
497 | + put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); | |
491 | 498 | |
492 | 499 | /* |
493 | 500 | * Exit now if we don't support TP. |
include/target/target_core_backend.h
... | ... | @@ -34,6 +34,11 @@ |
34 | 34 | sense_reason_t (*parse_cdb)(struct se_cmd *cmd); |
35 | 35 | u32 (*get_device_type)(struct se_device *); |
36 | 36 | sector_t (*get_blocks)(struct se_device *); |
37 | + sector_t (*get_alignment_offset_lbas)(struct se_device *); | |
38 | + /* lbppbe = logical blocks per physical block exponent. see SBC-3 */ | |
39 | + unsigned int (*get_lbppbe)(struct se_device *); | |
40 | + unsigned int (*get_io_min)(struct se_device *); | |
41 | + unsigned int (*get_io_opt)(struct se_device *); | |
37 | 42 | unsigned char *(*get_sense_buffer)(struct se_cmd *); |
38 | 43 | bool (*get_write_cache)(struct se_device *); |
39 | 44 | }; |