Commit defd94b75409b983f94548ea2f52ff5787ddb848

Authored by Mike Christie
Committed by James Bottomley
1 parent 8b05b773b6

[SCSI] seperate max_sectors from max_hw_sectors

- export __blk_put_request and blk_execute_rq_nowait
needed for async REQ_BLOCK_PC requests
- seperate max_hw_sectors and max_sectors for block/scsi_ioctl.c and
SG_IO bio.c helpers per Jens's last comments. Since block/scsi_ioctl.c SG_IO was
already testing against max_sectors and SCSI-ml was setting max_sectors and
max_hw_sectors to the same value this does not change any scsi SG_IO behavior. It only
prepares ll_rw_blk.c, scsi_ioctl.c and bio.c for when SCSI-ml begins to set
a valid max_hw_sectors for all LLDs. Today if a LLD does not set it
SCSI-ml sets it to a safe default and some LLDs set it to a artificial low
value to overcome memory and feedback issues.

Note: Since we now cap max_sectors to BLK_DEF_MAX_SECTORS, which is 1024,
drivers that used to call blk_queue_max_sectors with a large value of
max_sectors will now see the fs requests capped to BLK_DEF_MAX_SECTORS.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>

Showing 6 changed files with 42 additions and 21 deletions Side-by-side Diff

... ... @@ -239,7 +239,7 @@
239 239 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
240 240 q->backing_dev_info.state = 0;
241 241 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
242   - blk_queue_max_sectors(q, MAX_SECTORS);
  242 + blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
243 243 blk_queue_hardsect_size(q, 512);
244 244 blk_queue_dma_alignment(q, 511);
245 245 blk_queue_congestion_threshold(q);
... ... @@ -555,7 +555,12 @@
555 555 printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
556 556 }
557 557  
558   - q->max_sectors = q->max_hw_sectors = max_sectors;
  558 + if (BLK_DEF_MAX_SECTORS > max_sectors)
  559 + q->max_hw_sectors = q->max_sectors = max_sectors;
  560 + else {
  561 + q->max_sectors = BLK_DEF_MAX_SECTORS;
  562 + q->max_hw_sectors = max_sectors;
  563 + }
559 564 }
560 565  
561 566 EXPORT_SYMBOL(blk_queue_max_sectors);
... ... @@ -657,8 +662,8 @@
657 662 void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
658 663 {
659 664 /* zero is "infinity" */
660   - t->max_sectors = t->max_hw_sectors =
661   - min_not_zero(t->max_sectors,b->max_sectors);
  665 + t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
  666 + t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
662 667  
663 668 t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
664 669 t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
665 670  
... ... @@ -1293,9 +1298,15 @@
1293 1298 static int ll_back_merge_fn(request_queue_t *q, struct request *req,
1294 1299 struct bio *bio)
1295 1300 {
  1301 + unsigned short max_sectors;
1296 1302 int len;
1297 1303  
1298   - if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
  1304 + if (unlikely(blk_pc_request(req)))
  1305 + max_sectors = q->max_hw_sectors;
  1306 + else
  1307 + max_sectors = q->max_sectors;
  1308 +
  1309 + if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
1299 1310 req->flags |= REQ_NOMERGE;
1300 1311 if (req == q->last_merge)
1301 1312 q->last_merge = NULL;
1302 1313  
... ... @@ -1325,9 +1336,16 @@
1325 1336 static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1326 1337 struct bio *bio)
1327 1338 {
  1339 + unsigned short max_sectors;
1328 1340 int len;
1329 1341  
1330   - if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
  1342 + if (unlikely(blk_pc_request(req)))
  1343 + max_sectors = q->max_hw_sectors;
  1344 + else
  1345 + max_sectors = q->max_sectors;
  1346 +
  1347 +
  1348 + if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
1331 1349 req->flags |= REQ_NOMERGE;
1332 1350 if (req == q->last_merge)
1333 1351 q->last_merge = NULL;
... ... @@ -2144,7 +2162,7 @@
2144 2162 struct bio *bio;
2145 2163 int reading;
2146 2164  
2147   - if (len > (q->max_sectors << 9))
  2165 + if (len > (q->max_hw_sectors << 9))
2148 2166 return -EINVAL;
2149 2167 if (!len || !ubuf)
2150 2168 return -EINVAL;
... ... @@ -2259,7 +2277,7 @@
2259 2277 {
2260 2278 struct bio *bio;
2261 2279  
2262   - if (len > (q->max_sectors << 9))
  2280 + if (len > (q->max_hw_sectors << 9))
2263 2281 return -EINVAL;
2264 2282 if (!len || !kbuf)
2265 2283 return -EINVAL;
... ... @@ -233,7 +233,7 @@
233 233 if (verify_command(file, cmd))
234 234 return -EPERM;
235 235  
236   - if (hdr->dxfer_len > (q->max_sectors << 9))
  236 + if (hdr->dxfer_len > (q->max_hw_sectors << 9))
237 237 return -EIO;
238 238  
239 239 if (hdr->dxfer_len)
drivers/md/dm-table.c
... ... @@ -638,7 +638,7 @@
638 638 static void check_for_valid_limits(struct io_restrictions *rs)
639 639 {
640 640 if (!rs->max_sectors)
641   - rs->max_sectors = MAX_SECTORS;
  641 + rs->max_sectors = SAFE_MAX_SECTORS;
642 642 if (!rs->max_phys_segments)
643 643 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
644 644 if (!rs->max_hw_segments)
drivers/scsi/scsi_lib.c
... ... @@ -462,6 +462,7 @@
462 462 req = blk_get_request(sdev->request_queue, write, gfp);
463 463 if (!req)
464 464 goto free_sense;
  465 + req->flags |= REQ_BLOCK_PC | REQ_QUIET;
465 466  
466 467 if (use_sg)
467 468 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
... ... @@ -477,7 +478,6 @@
477 478 req->sense_len = 0;
478 479 req->timeout = timeout;
479 480 req->retries = retries;
480   - req->flags |= REQ_BLOCK_PC | REQ_QUIET;
481 481 req->end_io_data = sioc;
482 482  
483 483 sioc->data = privdata;
... ... @@ -313,7 +313,8 @@
313 313 }
314 314  
315 315 static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
316   - *page, unsigned int len, unsigned int offset)
  316 + *page, unsigned int len, unsigned int offset,
  317 + unsigned short max_sectors)
317 318 {
318 319 int retried_segments = 0;
319 320 struct bio_vec *bvec;
... ... @@ -327,7 +328,7 @@
327 328 if (bio->bi_vcnt >= bio->bi_max_vecs)
328 329 return 0;
329 330  
330   - if (((bio->bi_size + len) >> 9) > q->max_sectors)
  331 + if (((bio->bi_size + len) >> 9) > max_sectors)
331 332 return 0;
332 333  
333 334 /*
... ... @@ -401,7 +402,7 @@
401 402 int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
402 403 unsigned int len, unsigned int offset)
403 404 {
404   - return __bio_add_page(q, bio, page, len, offset);
  405 + return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
405 406 }
406 407  
407 408 /**
... ... @@ -420,8 +421,8 @@
420 421 int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
421 422 unsigned int offset)
422 423 {
423   - return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page,
424   - len, offset);
  424 + struct request_queue *q = bdev_get_queue(bio->bi_bdev);
  425 + return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
425 426 }
426 427  
427 428 struct bio_map_data {
... ... @@ -533,7 +534,7 @@
533 534 break;
534 535 }
535 536  
536   - if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
  537 + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
537 538 ret = -EINVAL;
538 539 break;
539 540 }
... ... @@ -647,7 +648,8 @@
647 648 /*
648 649 * sorry...
649 650 */
650   - if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)
  651 + if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
  652 + bytes)
651 653 break;
652 654  
653 655 len -= bytes;
... ... @@ -820,8 +822,8 @@
820 822 if (bytes > len)
821 823 bytes = len;
822 824  
823   - if (__bio_add_page(q, bio, virt_to_page(data), bytes,
824   - offset) < bytes)
  825 + if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
  826 + offset) < bytes)
825 827 break;
826 828  
827 829 data += bytes;
include/linux/blkdev.h
... ... @@ -702,7 +702,8 @@
702 702  
703 703 #define MAX_PHYS_SEGMENTS 128
704 704 #define MAX_HW_SEGMENTS 128
705   -#define MAX_SECTORS 255
  705 +#define SAFE_MAX_SECTORS 255
  706 +#define BLK_DEF_MAX_SECTORS 1024
706 707  
707 708 #define MAX_SEGMENT_SIZE 65536
708 709