Commit e1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1

Authored by Martin K. Petersen
Committed by Jens Axboe
1 parent 9bd7de51ee

block: Do away with the notion of hardsect_size

Until now we have had a 1:1 mapping between storage device physical
block size and the logical block sized used when addressing the device.
With SATA 4KB drives coming out that will no longer be the case.  The
sector size will be 4KB but the logical block size will remain
512-bytes.  Hence we need to distinguish between the physical block size
and the logical ditto.

This patch renames hardsect_size to logical_block_size.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

Showing 54 changed files with 108 additions and 98 deletions Side-by-side Diff

arch/powerpc/sysdev/axonram.c
... ... @@ -250,7 +250,7 @@
250 250  
251 251 set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
252 252 blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
253   - blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
  253 + blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
254 254 add_disk(bank->disk);
255 255  
256 256 bank->irq_id = irq_of_parse_and_map(device->node, 0);
block/blk-integrity.c
... ... @@ -340,7 +340,7 @@
340 340 kobject_uevent(&bi->kobj, KOBJ_ADD);
341 341  
342 342 bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE;
343   - bi->sector_size = disk->queue->hardsect_size;
  343 + bi->sector_size = queue_logical_block_size(disk->queue);
344 344 disk->integrity = bi;
345 345 } else
346 346 bi = disk->integrity;
block/blk-settings.c
... ... @@ -134,7 +134,7 @@
134 134 q->backing_dev_info.state = 0;
135 135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
136 136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
137   - blk_queue_hardsect_size(q, 512);
  137 + blk_queue_logical_block_size(q, 512);
138 138 blk_queue_dma_alignment(q, 511);
139 139 blk_queue_congestion_threshold(q);
140 140 q->nr_batching = BLK_BATCH_REQ;
141 141  
142 142  
143 143  
144 144  
145 145  
... ... @@ -288,21 +288,20 @@
288 288 EXPORT_SYMBOL(blk_queue_max_segment_size);
289 289  
290 290 /**
291   - * blk_queue_hardsect_size - set hardware sector size for the queue
  291 + * blk_queue_logical_block_size - set logical block size for the queue
292 292 * @q: the request queue for the device
293   - * @size: the hardware sector size, in bytes
  293 + * @size: the logical block size, in bytes
294 294 *
295 295 * Description:
296   - * This should typically be set to the lowest possible sector size
297   - * that the hardware can operate on (possible without reverting to
298   - * even internal read-modify-write operations). Usually the default
299   - * of 512 covers most hardware.
  296 + * This should be set to the lowest possible block size that the
  297 + * storage device can address. The default of 512 covers most
  298 + * hardware.
300 299 **/
301   -void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
  300 +void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
302 301 {
303   - q->hardsect_size = size;
  302 + q->logical_block_size = size;
304 303 }
305   -EXPORT_SYMBOL(blk_queue_hardsect_size);
  304 +EXPORT_SYMBOL(blk_queue_logical_block_size);
306 305  
307 306 /*
308 307 * Returns the minimum that is _not_ zero, unless both are zero.
... ... @@ -324,7 +323,7 @@
324 323 t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
325 324 t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
326 325 t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
327   - t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
  326 + t->logical_block_size = max(t->logical_block_size, b->logical_block_size);
328 327 if (!t->queue_lock)
329 328 WARN_ON_ONCE(1);
330 329 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
... ... @@ -100,9 +100,9 @@
100 100 return queue_var_show(max_sectors_kb, (page));
101 101 }
102 102  
103   -static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page)
  103 +static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
104 104 {
105   - return queue_var_show(q->hardsect_size, page);
  105 + return queue_var_show(queue_logical_block_size(q), page);
106 106 }
107 107  
108 108 static ssize_t
109 109  
... ... @@ -249,9 +249,14 @@
249 249  
250 250 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
251 251 .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
252   - .show = queue_hw_sector_size_show,
  252 + .show = queue_logical_block_size_show,
253 253 };
254 254  
  255 +static struct queue_sysfs_entry queue_logical_block_size_entry = {
  256 + .attr = {.name = "logical_block_size", .mode = S_IRUGO },
  257 + .show = queue_logical_block_size_show,
  258 +};
  259 +
255 260 static struct queue_sysfs_entry queue_nonrot_entry = {
256 261 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
257 262 .show = queue_nonrot_show,
... ... @@ -283,6 +288,7 @@
283 288 &queue_max_sectors_entry.attr,
284 289 &queue_iosched_entry.attr,
285 290 &queue_hw_sector_size_entry.attr,
  291 + &queue_logical_block_size_entry.attr,
286 292 &queue_nonrot_entry.attr,
287 293 &queue_nomerges_entry.attr,
288 294 &queue_rq_affinity_entry.attr,
block/compat_ioctl.c
... ... @@ -763,7 +763,7 @@
763 763 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
764 764 return compat_put_int(arg, block_size(bdev));
765 765 case BLKSSZGET: /* get block device hardware sector size */
766   - return compat_put_int(arg, bdev_hardsect_size(bdev));
  766 + return compat_put_int(arg, bdev_logical_block_size(bdev));
767 767 case BLKSECTGET:
768 768 return compat_put_ushort(arg,
769 769 bdev_get_queue(bdev)->max_sectors);
... ... @@ -311,7 +311,7 @@
311 311 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
312 312 return put_int(arg, block_size(bdev));
313 313 case BLKSSZGET: /* get block device hardware sector size */
314   - return put_int(arg, bdev_hardsect_size(bdev));
  314 + return put_int(arg, bdev_logical_block_size(bdev));
315 315 case BLKSECTGET:
316 316 return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
317 317 case BLKRASET:
drivers/block/cciss.c
... ... @@ -1389,8 +1389,8 @@
1389 1389  
1390 1390 disk->queue->queuedata = h;
1391 1391  
1392   - blk_queue_hardsect_size(disk->queue,
1393   - h->drv[drv_index].block_size);
  1392 + blk_queue_logical_block_size(disk->queue,
  1393 + h->drv[drv_index].block_size);
1394 1394  
1395 1395 /* Make sure all queue data is written out before */
1396 1396 /* setting h->drv[drv_index].queue, as setting this */
... ... @@ -2298,7 +2298,7 @@
2298 2298 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2299 2299 inq_buff, drv);
2300 2300  
2301   - blk_queue_hardsect_size(drv->queue, drv->block_size);
  2301 + blk_queue_logical_block_size(drv->queue, drv->block_size);
2302 2302 set_capacity(disk, drv->nr_blocks);
2303 2303  
2304 2304 kfree(inq_buff);
drivers/block/cpqarray.c
... ... @@ -474,7 +474,7 @@
474 474 disk->fops = &ida_fops;
475 475 if (j && !drv->nr_blks)
476 476 continue;
477   - blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
  477 + blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
478 478 set_capacity(disk, drv->nr_blks);
479 479 disk->queue = hba[i]->queue;
480 480 disk->private_data = drv;
... ... @@ -1546,7 +1546,7 @@
1546 1546 drv_info_t *drv = &host->drv[i];
1547 1547 if (i && !drv->nr_blks)
1548 1548 continue;
1549   - blk_queue_hardsect_size(host->queue, drv->blk_size);
  1549 + blk_queue_logical_block_size(host->queue, drv->blk_size);
1550 1550 set_capacity(disk, drv->nr_blks);
1551 1551 disk->queue = host->queue;
1552 1552 disk->private_data = drv;
... ... @@ -724,7 +724,7 @@
724 724 blk_queue_max_sectors(hd_queue, 255);
725 725 init_timer(&device_timer);
726 726 device_timer.function = hd_times_out;
727   - blk_queue_hardsect_size(hd_queue, 512);
  727 + blk_queue_logical_block_size(hd_queue, 512);
728 728  
729 729 if (!NR_HD) {
730 730 /*
drivers/block/mg_disk.c
... ... @@ -996,7 +996,7 @@
996 996 goto probe_err_6;
997 997 }
998 998 blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
999   - blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
  999 + blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
1000 1000  
1001 1001 init_timer(&host->timer);
1002 1002 host->timer.function = mg_times_out;
drivers/block/pktcdvd.c
... ... @@ -2657,7 +2657,7 @@
2657 2657 struct request_queue *q = pd->disk->queue;
2658 2658  
2659 2659 blk_queue_make_request(q, pkt_make_request);
2660   - blk_queue_hardsect_size(q, CD_FRAMESIZE);
  2660 + blk_queue_logical_block_size(q, CD_FRAMESIZE);
2661 2661 blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
2662 2662 blk_queue_merge_bvec(q, pkt_merge_bvec);
2663 2663 q->queuedata = pd;
drivers/block/ps3disk.c
... ... @@ -477,7 +477,7 @@
477 477 blk_queue_max_sectors(queue, dev->bounce_size >> 9);
478 478 blk_queue_segment_boundary(queue, -1UL);
479 479 blk_queue_dma_alignment(queue, dev->blk_size-1);
480   - blk_queue_hardsect_size(queue, dev->blk_size);
  480 + blk_queue_logical_block_size(queue, dev->blk_size);
481 481  
482 482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
483 483 ps3disk_prepare_flush);
... ... @@ -722,7 +722,7 @@
722 722 /*
723 723 * build the command
724 724 *
725   - * The call to blk_queue_hardsect_size() guarantees that request
  725 + * The call to blk_queue_logical_block_size() guarantees that request
726 726 * is aligned, but it is given in terms of 512 byte units, always.
727 727 */
728 728 block = blk_rq_pos(rq) >> lun->capacity.bshift;
... ... @@ -1749,7 +1749,7 @@
1749 1749 ub_revalidate(lun->udev, lun);
1750 1750  
1751 1751 /* XXX Support sector size switching like in sr.c */
1752   - blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
  1752 + blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
1753 1753 set_capacity(disk, lun->capacity.nsec);
1754 1754 // set_disk_ro(sdkp->disk, lun->readonly);
1755 1755  
... ... @@ -2324,7 +2324,7 @@
2324 2324 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2325 2325 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
2326 2326 blk_queue_max_sectors(q, UB_MAX_SECTORS);
2327   - blk_queue_hardsect_size(q, lun->capacity.bsize);
  2327 + blk_queue_logical_block_size(q, lun->capacity.bsize);
2328 2328  
2329 2329 lun->disk = disk;
2330 2330 q->queuedata = lun;
drivers/block/virtio_blk.c
... ... @@ -347,7 +347,7 @@
347 347 offsetof(struct virtio_blk_config, blk_size),
348 348 &blk_size);
349 349 if (!err)
350   - blk_queue_hardsect_size(vblk->disk->queue, blk_size);
  350 + blk_queue_logical_block_size(vblk->disk->queue, blk_size);
351 351  
352 352 add_disk(vblk->disk);
353 353 return 0;
drivers/block/xen-blkfront.c
... ... @@ -344,7 +344,7 @@
344 344 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
345 345  
346 346 /* Hard sector size and max sectors impersonate the equiv. hardware. */
347   - blk_queue_hardsect_size(rq, sector_size);
  347 + blk_queue_logical_block_size(rq, sector_size);
348 348 blk_queue_max_sectors(rq, 512);
349 349  
350 350 /* Each segment in a request is up to an aligned page in size. */
drivers/block/xsysace.c
... ... @@ -984,7 +984,7 @@
984 984 ace->queue = blk_init_queue(ace_request, &ace->lock);
985 985 if (ace->queue == NULL)
986 986 goto err_blk_initq;
987   - blk_queue_hardsect_size(ace->queue, 512);
  987 + blk_queue_logical_block_size(ace->queue, 512);
988 988  
989 989 /*
990 990 * Allocate and initialize GD structure
drivers/cdrom/gdrom.c
... ... @@ -739,7 +739,7 @@
739 739  
740 740 static int __devinit probe_gdrom_setupqueue(void)
741 741 {
742   - blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
  742 + blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
743 743 /* using DMA so memory will need to be contiguous */
744 744 blk_queue_max_hw_segments(gd.gdrom_rq, 1);
745 745 /* set a large max size to get most from DMA */
drivers/cdrom/viocd.c
... ... @@ -469,8 +469,8 @@
469 469 case viocdopen:
470 470 if (event->xRc == 0) {
471 471 di = &viocd_diskinfo[bevent->disk];
472   - blk_queue_hardsect_size(di->viocd_disk->queue,
473   - bevent->block_size);
  472 + blk_queue_logical_block_size(di->viocd_disk->queue,
  473 + bevent->block_size);
474 474 set_capacity(di->viocd_disk,
475 475 bevent->media_size *
476 476 bevent->block_size / 512);
... ... @@ -71,7 +71,7 @@
71 71 err = bd_claim(bdev, raw_open);
72 72 if (err)
73 73 goto out1;
74   - err = set_blocksize(bdev, bdev_hardsect_size(bdev));
  74 + err = set_blocksize(bdev, bdev_logical_block_size(bdev));
75 75 if (err)
76 76 goto out2;
77 77 filp->f_flags |= O_DIRECT;
drivers/ide/ide-cd.c
... ... @@ -182,7 +182,7 @@
182 182 (sense->information[2] << 8) |
183 183 (sense->information[3]);
184 184  
185   - if (drive->queue->hardsect_size == 2048)
  185 + if (queue_logical_block_size(drive->queue) == 2048)
186 186 /* device sector size is 2K */
187 187 sector <<= 2;
188 188  
... ... @@ -737,7 +737,7 @@
737 737 struct request_queue *q = drive->queue;
738 738 int write = rq_data_dir(rq) == WRITE;
739 739 unsigned short sectors_per_frame =
740   - queue_hardsect_size(q) >> SECTOR_BITS;
  740 + queue_logical_block_size(q) >> SECTOR_BITS;
741 741  
742 742 ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
743 743 "secs_per_frame: %u",
... ... @@ -1021,8 +1021,8 @@
1021 1021 /* save a private copy of the TOC capacity for error handling */
1022 1022 drive->probed_capacity = toc->capacity * sectors_per_frame;
1023 1023  
1024   - blk_queue_hardsect_size(drive->queue,
1025   - sectors_per_frame << SECTOR_BITS);
  1024 + blk_queue_logical_block_size(drive->queue,
  1025 + sectors_per_frame << SECTOR_BITS);
1026 1026  
1027 1027 /* first read just the header, so we know how long the TOC is */
1028 1028 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
... ... @@ -1338,7 +1338,7 @@
1338 1338 /* standard prep_rq_fn that builds 10 byte cmds */
1339 1339 static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1340 1340 {
1341   - int hard_sect = queue_hardsect_size(q);
  1341 + int hard_sect = queue_logical_block_size(q);
1342 1342 long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
1343 1343 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
1344 1344  
... ... @@ -1543,7 +1543,7 @@
1543 1543  
1544 1544 nslots = ide_cdrom_probe_capabilities(drive);
1545 1545  
1546   - blk_queue_hardsect_size(q, CD_FRAMESIZE);
  1546 + blk_queue_logical_block_size(q, CD_FRAMESIZE);
1547 1547  
1548 1548 if (ide_cdrom_register(drive, nslots)) {
1549 1549 printk(KERN_ERR PFX "%s: %s failed to register device with the"
... ... @@ -232,7 +232,7 @@
232 232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
233 233  
234 234 if (sync_page_io(rdev->bdev, target,
235   - roundup(size, bdev_hardsect_size(rdev->bdev)),
  235 + roundup(size, bdev_logical_block_size(rdev->bdev)),
236 236 page, READ)) {
237 237 page->index = index;
238 238 attach_page_buffers(page, NULL); /* so that free_buffer will
... ... @@ -287,7 +287,7 @@
287 287 int size = PAGE_SIZE;
288 288 if (page->index == bitmap->file_pages-1)
289 289 size = roundup(bitmap->last_page_size,
290   - bdev_hardsect_size(rdev->bdev));
  290 + bdev_logical_block_size(rdev->bdev));
291 291 /* Just make sure we aren't corrupting data or
292 292 * metadata
293 293 */
drivers/md/dm-exception-store.c
... ... @@ -178,7 +178,7 @@
178 178 }
179 179  
180 180 /* Validate the chunk size against the device block size */
181   - if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) {
  181 + if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
182 182 *error = "Chunk size is not a multiple of device blocksize";
183 183 return -EINVAL;
184 184 }
... ... @@ -413,7 +413,8 @@
413 413 * Buffer holds both header and bitset.
414 414 */
415 415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
416   - bitset_size, ti->limits.hardsect_size);
  416 + bitset_size,
  417 + ti->limits.logical_block_size);
417 418  
418 419 if (buf_size > dev->bdev->bd_inode->i_size) {
419 420 DMWARN("log device %s too small: need %llu bytes",
drivers/md/dm-snap-persistent.c
... ... @@ -282,7 +282,7 @@
282 282 */
283 283 if (!ps->store->chunk_size) {
284 284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
285   - bdev_hardsect_size(ps->store->cow->bdev) >> 9);
  285 + bdev_logical_block_size(ps->store->cow->bdev) >> 9);
286 286 ps->store->chunk_mask = ps->store->chunk_size - 1;
287 287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
288 288 chunk_size_supplied = 0;
drivers/md/dm-table.c
... ... @@ -108,7 +108,8 @@
108 108 lhs->max_hw_segments =
109 109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
110 110  
111   - lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
  111 + lhs->logical_block_size = max(lhs->logical_block_size,
  112 + rhs->logical_block_size);
112 113  
113 114 lhs->max_segment_size =
114 115 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
... ... @@ -529,7 +530,8 @@
529 530 rs->max_hw_segments =
530 531 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
531 532  
532   - rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
  533 + rs->logical_block_size = max(rs->logical_block_size,
  534 + queue_logical_block_size(q));
533 535  
534 536 rs->max_segment_size =
535 537 min_not_zero(rs->max_segment_size, q->max_segment_size);
... ... @@ -683,8 +685,8 @@
683 685 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
684 686 if (!rs->max_hw_segments)
685 687 rs->max_hw_segments = MAX_HW_SEGMENTS;
686   - if (!rs->hardsect_size)
687   - rs->hardsect_size = 1 << SECTOR_SHIFT;
  688 + if (!rs->logical_block_size)
  689 + rs->logical_block_size = 1 << SECTOR_SHIFT;
688 690 if (!rs->max_segment_size)
689 691 rs->max_segment_size = MAX_SEGMENT_SIZE;
690 692 if (!rs->seg_boundary_mask)
... ... @@ -914,7 +916,7 @@
914 916 blk_queue_max_sectors(q, t->limits.max_sectors);
915 917 q->max_phys_segments = t->limits.max_phys_segments;
916 918 q->max_hw_segments = t->limits.max_hw_segments;
917   - q->hardsect_size = t->limits.hardsect_size;
  919 + q->logical_block_size = t->limits.logical_block_size;
918 920 q->max_segment_size = t->limits.max_segment_size;
919 921 q->max_hw_sectors = t->limits.max_hw_sectors;
920 922 q->seg_boundary_mask = t->limits.seg_boundary_mask;
... ... @@ -1202,7 +1202,7 @@
1202 1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1203 1203  
1204 1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1205   - bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
  1205 + bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1206 1206 if (rdev->sb_size & bmask)
1207 1207 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1208 1208  
drivers/memstick/core/mspro_block.c
... ... @@ -1242,7 +1242,7 @@
1242 1242  
1243 1243 sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
1244 1244  
1245   - blk_queue_hardsect_size(msb->queue, msb->page_size);
  1245 + blk_queue_logical_block_size(msb->queue, msb->page_size);
1246 1246  
1247 1247 capacity = be16_to_cpu(sys_info->user_block_count);
1248 1248 capacity *= be16_to_cpu(sys_info->block_size);
drivers/message/i2o/i2o_block.c
... ... @@ -794,8 +794,9 @@
794 794 if (c->adaptec) {
795 795 u8 cmd[10];
796 796 u32 scsi_flags;
797   - u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT;
  797 + u16 hwsec;
798 798  
  799 + hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
799 800 memset(cmd, 0, 10);
800 801  
801 802 sgl_offset = SGL_OFFSET_12;
... ... @@ -1078,7 +1079,7 @@
1078 1079 */
1079 1080 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1080 1081 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1081   - blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
  1082 + blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
1082 1083 } else
1083 1084 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1084 1085  
drivers/mmc/card/block.c
... ... @@ -521,7 +521,7 @@
521 521  
522 522 sprintf(md->disk->disk_name, "mmcblk%d", devidx);
523 523  
524   - blk_queue_hardsect_size(md->queue.queue, 512);
  524 + blk_queue_logical_block_size(md->queue.queue, 512);
525 525  
526 526 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
527 527 /*
drivers/mtd/mtd_blkdevs.c
... ... @@ -378,7 +378,7 @@
378 378 }
379 379  
380 380 tr->blkcore_priv->rq->queuedata = tr;
381   - blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
  381 + blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
382 382 if (tr->discard)
383 383 blk_queue_set_discard(tr->blkcore_priv->rq,
384 384 blktrans_discard_request);
drivers/s390/block/dasd.c
... ... @@ -1990,7 +1990,7 @@
1990 1990 {
1991 1991 int max;
1992 1992  
1993   - blk_queue_hardsect_size(block->request_queue, block->bp_block);
  1993 + blk_queue_logical_block_size(block->request_queue, block->bp_block);
1994 1994 max = block->base->discipline->max_blocks << block->s2b_shift;
1995 1995 blk_queue_max_sectors(block->request_queue, max);
1996 1996 blk_queue_max_phys_segments(block->request_queue, -1L);
drivers/s390/block/dcssblk.c
... ... @@ -602,7 +602,7 @@
602 602 dev_info->gd->private_data = dev_info;
603 603 dev_info->gd->driverfs_dev = &dev_info->dev;
604 604 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
605   - blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
  605 + blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
606 606  
607 607 seg_byte_size = (dev_info->end - dev_info->start + 1);
608 608 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
drivers/s390/block/xpram.c
... ... @@ -343,7 +343,7 @@
343 343 goto out;
344 344 }
345 345 blk_queue_make_request(xpram_queues[i], xpram_make_request);
346   - blk_queue_hardsect_size(xpram_queues[i], 4096);
  346 + blk_queue_logical_block_size(xpram_queues[i], 4096);
347 347 }
348 348  
349 349 /*
drivers/s390/char/tape_block.c
... ... @@ -222,7 +222,7 @@
222 222 if (rc)
223 223 goto cleanup_queue;
224 224  
225   - blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
  225 + blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
226 226 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
227 227 blk_queue_max_phys_segments(blkdat->request_queue, -1L);
228 228 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
... ... @@ -1510,7 +1510,7 @@
1510 1510 */
1511 1511 sector_size = 512;
1512 1512 }
1513   - blk_queue_hardsect_size(sdp->request_queue, sector_size);
  1513 + blk_queue_logical_block_size(sdp->request_queue, sector_size);
1514 1514  
1515 1515 {
1516 1516 char cap_str_2[10], cap_str_10[10];
... ... @@ -727,7 +727,7 @@
727 727 }
728 728  
729 729 queue = cd->device->request_queue;
730   - blk_queue_hardsect_size(queue, sector_size);
  730 + blk_queue_logical_block_size(queue, sector_size);
731 731  
732 732 return;
733 733 }
... ... @@ -1490,11 +1490,12 @@
1490 1490 sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1491 1491 unsigned int offset)
1492 1492 {
1493   - unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue);
  1493 + unsigned int sector_sz;
1494 1494 struct bio_vec *bv;
1495 1495 sector_t sectors;
1496 1496 int i;
1497 1497  
  1498 + sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1498 1499 sectors = 0;
1499 1500  
1500 1501 if (index >= bio->bi_idx)
... ... @@ -76,7 +76,7 @@
76 76 return -EINVAL;
77 77  
78 78 /* Size cannot be smaller than the size supported by the device */
79   - if (size < bdev_hardsect_size(bdev))
  79 + if (size < bdev_logical_block_size(bdev))
80 80 return -EINVAL;
81 81  
82 82 /* Don't change the size if it is same as current */
... ... @@ -106,7 +106,7 @@
106 106  
107 107 int sb_min_blocksize(struct super_block *sb, int size)
108 108 {
109   - int minsize = bdev_hardsect_size(sb->s_bdev);
  109 + int minsize = bdev_logical_block_size(sb->s_bdev);
110 110 if (size < minsize)
111 111 size = minsize;
112 112 return sb_set_blocksize(sb, size);
... ... @@ -1117,7 +1117,7 @@
1117 1117  
1118 1118 void bd_set_size(struct block_device *bdev, loff_t size)
1119 1119 {
1120   - unsigned bsize = bdev_hardsect_size(bdev);
  1120 + unsigned bsize = bdev_logical_block_size(bdev);
1121 1121  
1122 1122 bdev->bd_inode->i_size = size;
1123 1123 while (bsize < PAGE_CACHE_SIZE) {
... ... @@ -1085,12 +1085,12 @@
1085 1085 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1086 1086 {
1087 1087 /* Size must be multiple of hard sectorsize */
1088   - if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
  1088 + if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1089 1089 (size < 512 || size > PAGE_SIZE))) {
1090 1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1091 1091 size);
1092   - printk(KERN_ERR "hardsect size: %d\n",
1093   - bdev_hardsect_size(bdev));
  1092 + printk(KERN_ERR "logical block size: %d\n",
  1093 + bdev_logical_block_size(bdev));
1094 1094  
1095 1095 dump_stack();
1096 1096 return NULL;
... ... @@ -1127,7 +1127,7 @@
1127 1127 rw = WRITE_ODIRECT;
1128 1128  
1129 1129 if (bdev)
1130   - bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
  1130 + bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
1131 1131  
1132 1132 if (offset & blocksize_mask) {
1133 1133 if (bdev)
... ... @@ -1696,7 +1696,7 @@
1696 1696 goto failed_mount;
1697 1697 }
1698 1698  
1699   - hblock = bdev_hardsect_size(sb->s_bdev);
  1699 + hblock = bdev_logical_block_size(sb->s_bdev);
1700 1700 if (sb->s_blocksize != blocksize) {
1701 1701 /*
1702 1702 * Make sure the blocksize for the filesystem is larger
... ... @@ -2119,7 +2119,7 @@
2119 2119 }
2120 2120  
2121 2121 blocksize = sb->s_blocksize;
2122   - hblock = bdev_hardsect_size(bdev);
  2122 + hblock = bdev_logical_block_size(bdev);
2123 2123 if (blocksize < hblock) {
2124 2124 printk(KERN_ERR
2125 2125 "EXT3-fs: blocksize too small for journal device.\n");
... ... @@ -2962,7 +2962,7 @@
2962 2962 }
2963 2963  
2964 2964 blocksize = sb->s_blocksize;
2965   - hblock = bdev_hardsect_size(bdev);
  2965 + hblock = bdev_logical_block_size(bdev);
2966 2966 if (blocksize < hblock) {
2967 2967 printk(KERN_ERR
2968 2968 "EXT4-fs: blocksize too small for journal device.\n");
fs/gfs2/ops_fstype.c
... ... @@ -526,11 +526,11 @@
526 526 }
527 527  
528 528 /* Set up the buffer cache and SB for real */
529   - if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) {
  529 + if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
530 530 ret = -EINVAL;
531 531 fs_err(sdp, "FS block size (%u) is too small for device "
532 532 "block size (%u)\n",
533   - sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev));
  533 + sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
534 534 goto out;
535 535 }
536 536 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
... ... @@ -845,7 +845,7 @@
845 845 struct super_block *sb = sdp->sd_vfs;
846 846 struct block_device *bdev = sb->s_bdev;
847 847 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
848   - bdev_hardsect_size(sb->s_bdev);
  848 + bdev_logical_block_size(sb->s_bdev);
849 849 u64 blk;
850 850 sector_t start = 0;
851 851 sector_t nr_sects = 0;
fs/nilfs2/the_nilfs.c
... ... @@ -515,7 +515,7 @@
515 515  
516 516 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
517 517 if (sb->s_blocksize != blocksize) {
518   - int hw_blocksize = bdev_hardsect_size(sb->s_bdev);
  518 + int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
519 519  
520 520 if (blocksize < hw_blocksize) {
521 521 printk(KERN_ERR
... ... @@ -25,7 +25,7 @@
25 25 #include <linux/slab.h>
26 26 #include <linux/string.h>
27 27 #include <linux/spinlock.h>
28   -#include <linux/blkdev.h> /* For bdev_hardsect_size(). */
  28 +#include <linux/blkdev.h> /* For bdev_logical_block_size(). */
29 29 #include <linux/backing-dev.h>
30 30 #include <linux/buffer_head.h>
31 31 #include <linux/vfs.h>
32 32  
... ... @@ -2785,13 +2785,13 @@
2785 2785 goto err_out_now;
2786 2786  
2787 2787 /* We support sector sizes up to the PAGE_CACHE_SIZE. */
2788   - if (bdev_hardsect_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
  2788 + if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
2789 2789 if (!silent)
2790 2790 ntfs_error(sb, "Device has unsupported sector size "
2791 2791 "(%i). The maximum supported sector "
2792 2792 "size on this architecture is %lu "
2793 2793 "bytes.",
2794   - bdev_hardsect_size(sb->s_bdev),
  2794 + bdev_logical_block_size(sb->s_bdev),
2795 2795 PAGE_CACHE_SIZE);
2796 2796 goto err_out_now;
2797 2797 }
fs/ocfs2/cluster/heartbeat.c
... ... @@ -1371,7 +1371,7 @@
1371 1371  
1372 1372 bdevname(reg->hr_bdev, reg->hr_dev_name);
1373 1373  
1374   - sectsize = bdev_hardsect_size(reg->hr_bdev);
  1374 + sectsize = bdev_logical_block_size(reg->hr_bdev);
1375 1375 if (sectsize != reg->hr_block_bytes) {
1376 1376 mlog(ML_ERROR,
1377 1377 "blocksize %u incorrect for device, expected %d",
... ... @@ -713,7 +713,7 @@
713 713 *bh = NULL;
714 714  
715 715 /* may be > 512 */
716   - *sector_size = bdev_hardsect_size(sb->s_bdev);
  716 + *sector_size = bdev_logical_block_size(sb->s_bdev);
717 717 if (*sector_size > OCFS2_MAX_BLOCKSIZE) {
718 718 mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n",
719 719 *sector_size, OCFS2_MAX_BLOCKSIZE);
... ... @@ -76,7 +76,7 @@
76 76 Sector sect;
77 77  
78 78 res = 0;
79   - blocksize = bdev_hardsect_size(bdev);
  79 + blocksize = bdev_logical_block_size(bdev);
80 80 if (blocksize <= 0)
81 81 goto out_exit;
82 82 i_size = i_size_read(bdev->bd_inode);
fs/partitions/msdos.c
... ... @@ -110,7 +110,7 @@
110 110 Sector sect;
111 111 unsigned char *data;
112 112 u32 this_sector, this_size;
113   - int sector_size = bdev_hardsect_size(bdev) / 512;
  113 + int sector_size = bdev_logical_block_size(bdev) / 512;
114 114 int loopct = 0; /* number of links followed
115 115 without finding a data partition */
116 116 int i;
... ... @@ -415,7 +415,7 @@
415 415  
416 416 int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
417 417 {
418   - int sector_size = bdev_hardsect_size(bdev) / 512;
  418 + int sector_size = bdev_logical_block_size(bdev) / 512;
419 419 Sector sect;
420 420 unsigned char *data;
421 421 struct partition *p;
... ... @@ -1915,7 +1915,7 @@
1915 1915 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
1916 1916 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
1917 1917 } else {
1918   - uopt.blocksize = bdev_hardsect_size(sb->s_bdev);
  1918 + uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
1919 1919 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
1920 1920 if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
1921 1921 if (!silent)
fs/xfs/linux-2.6/xfs_buf.c
... ... @@ -1501,7 +1501,7 @@
1501 1501 struct block_device *bdev)
1502 1502 {
1503 1503 return xfs_setsize_buftarg_flags(btp,
1504   - PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
  1504 + PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
1505 1505 }
1506 1506  
1507 1507 int
include/linux/blkdev.h
... ... @@ -391,7 +391,7 @@
391 391 unsigned int max_hw_sectors;
392 392 unsigned short max_phys_segments;
393 393 unsigned short max_hw_segments;
394   - unsigned short hardsect_size;
  394 + unsigned short logical_block_size;
395 395 unsigned int max_segment_size;
396 396  
397 397 unsigned long seg_boundary_mask;
... ... @@ -901,7 +901,7 @@
901 901 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
902 902 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
903 903 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
904   -extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
  904 +extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
905 905 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
906 906 extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
907 907 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
908 908  
909 909  
910 910  
... ... @@ -988,19 +988,19 @@
988 988  
989 989 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
990 990  
991   -static inline int queue_hardsect_size(struct request_queue *q)
  991 +static inline unsigned short queue_logical_block_size(struct request_queue *q)
992 992 {
993 993 int retval = 512;
994 994  
995   - if (q && q->hardsect_size)
996   - retval = q->hardsect_size;
  995 + if (q && q->logical_block_size)
  996 + retval = q->logical_block_size;
997 997  
998 998 return retval;
999 999 }
1000 1000  
1001   -static inline int bdev_hardsect_size(struct block_device *bdev)
  1001 +static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1002 1002 {
1003   - return queue_hardsect_size(bdev_get_queue(bdev));
  1003 + return queue_logical_block_size(bdev_get_queue(bdev));
1004 1004 }
1005 1005  
1006 1006 static inline int queue_dma_alignment(struct request_queue *q)
include/linux/device-mapper.h
... ... @@ -149,7 +149,7 @@
149 149 unsigned max_hw_sectors;
150 150 unsigned max_sectors;
151 151 unsigned max_segment_size;
152   - unsigned short hardsect_size;
  152 + unsigned short logical_block_size;
153 153 unsigned short max_hw_segments;
154 154 unsigned short max_phys_segments;
155 155 unsigned char no_cluster; /* inverted so that 0 is default */