Commit 8f6c2e4b325a8e9f8f47febb2fd0ed4fae7d45a9
Committed by
NeilBrown
1 parent
5a4f13fad1
Exists in
master
and in
7 other branches
md: Use new topology calls to indicate alignment and I/O sizes
Switch MD over to the new disk_stack_limits() function which checks for aligment and adjusts preferred I/O sizes when stacking. Also indicate preferred I/O sizes where applicable. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: NeilBrown <neilb@suse.de>
Showing 6 changed files with 39 additions and 19 deletions Side-by-side Diff
drivers/md/linear.c
... | ... | @@ -166,8 +166,8 @@ |
166 | 166 | rdev->sectors = sectors * mddev->chunk_sectors; |
167 | 167 | } |
168 | 168 | |
169 | - blk_queue_stack_limits(mddev->queue, | |
170 | - rdev->bdev->bd_disk->queue); | |
169 | + disk_stack_limits(mddev->gendisk, rdev->bdev, | |
170 | + rdev->data_offset << 9); | |
171 | 171 | /* as we don't honour merge_bvec_fn, we must never risk |
172 | 172 | * violating it, so limit ->max_sector to one PAGE, as |
173 | 173 | * a one page request is never in violation. |
drivers/md/multipath.c
... | ... | @@ -294,7 +294,8 @@ |
294 | 294 | for (path = first; path <= last; path++) |
295 | 295 | if ((p=conf->multipaths+path)->rdev == NULL) { |
296 | 296 | q = rdev->bdev->bd_disk->queue; |
297 | - blk_queue_stack_limits(mddev->queue, q); | |
297 | + disk_stack_limits(mddev->gendisk, rdev->bdev, | |
298 | + rdev->data_offset << 9); | |
298 | 299 | |
299 | 300 | /* as we don't honour merge_bvec_fn, we must never risk |
300 | 301 | * violating it, so limit ->max_sector to one PAGE, as |
301 | 302 | |
... | ... | @@ -463,9 +464,9 @@ |
463 | 464 | |
464 | 465 | disk = conf->multipaths + disk_idx; |
465 | 466 | disk->rdev = rdev; |
467 | + disk_stack_limits(mddev->gendisk, rdev->bdev, | |
468 | + rdev->data_offset << 9); | |
466 | 469 | |
467 | - blk_queue_stack_limits(mddev->queue, | |
468 | - rdev->bdev->bd_disk->queue); | |
469 | 470 | /* as we don't honour merge_bvec_fn, we must never risk |
470 | 471 | * violating it, not that we ever expect a device with |
471 | 472 | * a merge_bvec_fn to be involved in multipath */ |
drivers/md/raid0.c
... | ... | @@ -170,8 +170,8 @@ |
170 | 170 | } |
171 | 171 | dev[j] = rdev1; |
172 | 172 | |
173 | - blk_queue_stack_limits(mddev->queue, | |
174 | - rdev1->bdev->bd_disk->queue); | |
173 | + disk_stack_limits(mddev->gendisk, rdev1->bdev, | |
174 | + rdev1->data_offset << 9); | |
175 | 175 | /* as we don't honour merge_bvec_fn, we must never risk |
176 | 176 | * violating it, so limit ->max_sector to one PAGE, as |
177 | 177 | * a one page request is never in violation. |
... | ... | @@ -250,6 +250,11 @@ |
250 | 250 | mddev->chunk_sectors << 9); |
251 | 251 | goto abort; |
252 | 252 | } |
253 | + | |
254 | + blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); | |
255 | + blk_queue_io_opt(mddev->queue, | |
256 | + (mddev->chunk_sectors << 9) * mddev->raid_disks); | |
257 | + | |
253 | 258 | printk(KERN_INFO "raid0: done.\n"); |
254 | 259 | mddev->private = conf; |
255 | 260 | return 0; |
drivers/md/raid1.c
... | ... | @@ -1123,8 +1123,8 @@ |
1123 | 1123 | for (mirror = first; mirror <= last; mirror++) |
1124 | 1124 | if ( !(p=conf->mirrors+mirror)->rdev) { |
1125 | 1125 | |
1126 | - blk_queue_stack_limits(mddev->queue, | |
1127 | - rdev->bdev->bd_disk->queue); | |
1126 | + disk_stack_limits(mddev->gendisk, rdev->bdev, | |
1127 | + rdev->data_offset << 9); | |
1128 | 1128 | /* as we don't honour merge_bvec_fn, we must never risk |
1129 | 1129 | * violating it, so limit ->max_sector to one PAGE, as |
1130 | 1130 | * a one page request is never in violation. |
... | ... | @@ -1988,9 +1988,8 @@ |
1988 | 1988 | disk = conf->mirrors + disk_idx; |
1989 | 1989 | |
1990 | 1990 | disk->rdev = rdev; |
1991 | - | |
1992 | - blk_queue_stack_limits(mddev->queue, | |
1993 | - rdev->bdev->bd_disk->queue); | |
1991 | + disk_stack_limits(mddev->gendisk, rdev->bdev, | |
1992 | + rdev->data_offset << 9); | |
1994 | 1993 | /* as we don't honour merge_bvec_fn, we must never risk |
1995 | 1994 | * violating it, so limit ->max_sector to one PAGE, as |
1996 | 1995 | * a one page request is never in violation. |
drivers/md/raid10.c
... | ... | @@ -1151,8 +1151,8 @@ |
1151 | 1151 | for ( ; mirror <= last ; mirror++) |
1152 | 1152 | if ( !(p=conf->mirrors+mirror)->rdev) { |
1153 | 1153 | |
1154 | - blk_queue_stack_limits(mddev->queue, | |
1155 | - rdev->bdev->bd_disk->queue); | |
1154 | + disk_stack_limits(mddev->gendisk, rdev->bdev, | |
1155 | + rdev->data_offset << 9); | |
1156 | 1156 | /* as we don't honour merge_bvec_fn, we must never risk |
1157 | 1157 | * violating it, so limit ->max_sector to one PAGE, as |
1158 | 1158 | * a one page request is never in violation. |
... | ... | @@ -2044,7 +2044,7 @@ |
2044 | 2044 | static int run(mddev_t *mddev) |
2045 | 2045 | { |
2046 | 2046 | conf_t *conf; |
2047 | - int i, disk_idx; | |
2047 | + int i, disk_idx, chunk_size; | |
2048 | 2048 | mirror_info_t *disk; |
2049 | 2049 | mdk_rdev_t *rdev; |
2050 | 2050 | int nc, fc, fo; |
... | ... | @@ -2130,6 +2130,14 @@ |
2130 | 2130 | spin_lock_init(&conf->device_lock); |
2131 | 2131 | mddev->queue->queue_lock = &conf->device_lock; |
2132 | 2132 | |
2133 | + chunk_size = mddev->chunk_sectors << 9; | |
2134 | + blk_queue_io_min(mddev->queue, chunk_size); | |
2135 | + if (conf->raid_disks % conf->near_copies) | |
2136 | + blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks); | |
2137 | + else | |
2138 | + blk_queue_io_opt(mddev->queue, chunk_size * | |
2139 | + (conf->raid_disks / conf->near_copies)); | |
2140 | + | |
2133 | 2141 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2134 | 2142 | disk_idx = rdev->raid_disk; |
2135 | 2143 | if (disk_idx >= mddev->raid_disks |
... | ... | @@ -2138,9 +2146,8 @@ |
2138 | 2146 | disk = conf->mirrors + disk_idx; |
2139 | 2147 | |
2140 | 2148 | disk->rdev = rdev; |
2141 | - | |
2142 | - blk_queue_stack_limits(mddev->queue, | |
2143 | - rdev->bdev->bd_disk->queue); | |
2149 | + disk_stack_limits(mddev->gendisk, rdev->bdev, | |
2150 | + rdev->data_offset << 9); | |
2144 | 2151 | /* as we don't honour merge_bvec_fn, we must never risk |
2145 | 2152 | * violating it, so limit ->max_sector to one PAGE, as |
2146 | 2153 | * a one page request is never in violation. |
drivers/md/raid5.c
... | ... | @@ -4452,7 +4452,7 @@ |
4452 | 4452 | static int run(mddev_t *mddev) |
4453 | 4453 | { |
4454 | 4454 | raid5_conf_t *conf; |
4455 | - int working_disks = 0; | |
4455 | + int working_disks = 0, chunk_size; | |
4456 | 4456 | mdk_rdev_t *rdev; |
4457 | 4457 | |
4458 | 4458 | if (mddev->recovery_cp != MaxSector) |
... | ... | @@ -4607,6 +4607,14 @@ |
4607 | 4607 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
4608 | 4608 | |
4609 | 4609 | blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); |
4610 | + chunk_size = mddev->chunk_sectors << 9; | |
4611 | + blk_queue_io_min(mddev->queue, chunk_size); | |
4612 | + blk_queue_io_opt(mddev->queue, chunk_size * | |
4613 | + (conf->raid_disks - conf->max_degraded)); | |
4614 | + | |
4615 | + list_for_each_entry(rdev, &mddev->disks, same_set) | |
4616 | + disk_stack_limits(mddev->gendisk, rdev->bdev, | |
4617 | + rdev->data_offset << 9); | |
4610 | 4618 | |
4611 | 4619 | return 0; |
4612 | 4620 | abort: |