Blame view
block/mq-deadline.c
20.6 KB
945ffb60c mq-deadline: add ... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
/* * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, * for the blk-mq scheduling framework * * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/elevator.h> #include <linux/bio.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/compiler.h> #include <linux/rbtree.h> #include <linux/sbitmap.h> #include "blk.h" #include "blk-mq.h" |
daaadb3e9 mq-deadline: add ... |
22 |
#include "blk-mq-debugfs.h" |
945ffb60c mq-deadline: add ... |
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
#include "blk-mq-tag.h" #include "blk-mq-sched.h" /* * See Documentation/block/deadline-iosched.txt */ static const int read_expire = HZ / 2; /* max time before a read is submitted. */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ static const int writes_starved = 2; /* max times reads can starve a write */ static const int fifo_batch = 16; /* # of sequential requests treated as one by the above parameters. For throughput. */ struct deadline_data { /* * run time data */ /* * requests (deadline_rq s) are present on both sort_list and fifo_list */ struct rb_root sort_list[2]; struct list_head fifo_list[2]; /* * next in sort order. read, write or both are NULL */ struct request *next_rq[2]; unsigned int batching; /* number of sequential requests made */ unsigned int starved; /* times reads have starved writes */ /* * settings that change how the i/o scheduler behaves */ int fifo_expire[2]; int fifo_batch; int writes_starved; int front_merges; spinlock_t lock; |
5700f6917 mq-deadline: Intr... |
62 |
spinlock_t zone_lock; |
945ffb60c mq-deadline: add ... |
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
struct list_head dispatch; }; static inline struct rb_root * deadline_rb_root(struct deadline_data *dd, struct request *rq) { return &dd->sort_list[rq_data_dir(rq)]; } /* * get the request after `rq' in sector-sorted order */ static inline struct request * deadline_latter_request(struct request *rq) { struct rb_node *node = rb_next(&rq->rb_node); if (node) return rb_entry_rq(node); return NULL; } static void deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) { struct rb_root *root = deadline_rb_root(dd, rq); elv_rb_add(root, rq); } static inline void deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) { const int data_dir = rq_data_dir(rq); if (dd->next_rq[data_dir] == rq) dd->next_rq[data_dir] = deadline_latter_request(rq); elv_rb_del(deadline_rb_root(dd, rq), rq); } /* * remove rq from rbtree and fifo. */ static void deadline_remove_request(struct request_queue *q, struct request *rq) { struct deadline_data *dd = q->elevator->elevator_data; list_del_init(&rq->queuelist); /* * We might not be on the rbtree, if we are doing an insert merge */ if (!RB_EMPTY_NODE(&rq->rb_node)) deadline_del_rq_rb(dd, rq); elv_rqhash_del(q, rq); if (q->last_merge == rq) q->last_merge = NULL; } static void dd_request_merged(struct request_queue *q, struct request *req, |
34fe7c054 block: enumify EL... |
126 |
enum elv_merge type) |
945ffb60c mq-deadline: add ... |
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
{ struct deadline_data *dd = q->elevator->elevator_data; /* * if the merge was a front merge, we need to reposition request */ if (type == ELEVATOR_FRONT_MERGE) { elv_rb_del(deadline_rb_root(dd, req), req); deadline_add_rq_rb(dd, req); } } static void dd_merged_requests(struct request_queue *q, struct request *req, struct request *next) { /* * if next expires before rq, assign its expire time to rq * and move into next position (next will be deleted) in fifo */ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { if (time_before((unsigned long)next->fifo_time, (unsigned long)req->fifo_time)) { list_move(&req->queuelist, &next->queuelist); req->fifo_time = next->fifo_time; } } /* * kill knowledge of next, this one is a goner */ deadline_remove_request(q, next); } /* * move an entry to dispatch queue */ static void deadline_move_request(struct deadline_data *dd, struct request *rq) { const int data_dir = rq_data_dir(rq); dd->next_rq[READ] = NULL; dd->next_rq[WRITE] = NULL; dd->next_rq[data_dir] = deadline_latter_request(rq); /* * take it off the sort and fifo list */ deadline_remove_request(rq->q, rq); } /* * deadline_check_fifo returns 0 if there are no expired requests on the fifo, * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) */ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) { struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); /* * rq is expired! */ if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) return 1; return 0; } /* |
bf09ce56f mq-deadline: Intr... |
196 197 198 199 200 201 |
* For the specified data direction, return the next request to * dispatch using arrival ordered lists. */ static struct request * deadline_fifo_request(struct deadline_data *dd, int data_dir) { |
5700f6917 mq-deadline: Intr... |
202 203 |
struct request *rq; unsigned long flags; |
bf09ce56f mq-deadline: Intr... |
204 205 206 207 208 |
if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) return NULL; if (list_empty(&dd->fifo_list[data_dir])) return NULL; |
5700f6917 mq-deadline: Intr... |
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 |
rq = rq_entry_fifo(dd->fifo_list[data_dir].next); if (data_dir == READ || !blk_queue_is_zoned(rq->q)) return rq; /* * Look for a write request that can be dispatched, that is one with * an unlocked target zone. */ spin_lock_irqsave(&dd->zone_lock, flags); list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) { if (blk_req_can_dispatch_to_zone(rq)) goto out; } rq = NULL; out: spin_unlock_irqrestore(&dd->zone_lock, flags); return rq; |
bf09ce56f mq-deadline: Intr... |
227 228 229 230 231 232 233 234 235 |
} /* * For the specified data direction, return the next request to * dispatch using sector position sorted lists. */ static struct request * deadline_next_request(struct deadline_data *dd, int data_dir) { |
5700f6917 mq-deadline: Intr... |
236 237 |
struct request *rq; unsigned long flags; |
bf09ce56f mq-deadline: Intr... |
238 239 |
if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) return NULL; |
5700f6917 mq-deadline: Intr... |
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 |
rq = dd->next_rq[data_dir]; if (!rq) return NULL; if (data_dir == READ || !blk_queue_is_zoned(rq->q)) return rq; /* * Look for a write request that can be dispatched, that is one with * an unlocked target zone. */ spin_lock_irqsave(&dd->zone_lock, flags); while (rq) { if (blk_req_can_dispatch_to_zone(rq)) break; rq = deadline_latter_request(rq); } spin_unlock_irqrestore(&dd->zone_lock, flags); return rq; |
bf09ce56f mq-deadline: Intr... |
260 261 262 |
} /* |
945ffb60c mq-deadline: add ... |
263 264 265 |
* deadline_dispatch_requests selects the best request according to * read/write expire, fifo_batch, etc */ |
ca11f209a mq-deadline: make... |
266 |
static struct request *__dd_dispatch_request(struct deadline_data *dd) |
945ffb60c mq-deadline: add ... |
267 |
{ |
bf09ce56f mq-deadline: Intr... |
268 |
struct request *rq, *next_rq; |
945ffb60c mq-deadline: add ... |
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 |
bool reads, writes; int data_dir; if (!list_empty(&dd->dispatch)) { rq = list_first_entry(&dd->dispatch, struct request, queuelist); list_del_init(&rq->queuelist); goto done; } reads = !list_empty(&dd->fifo_list[READ]); writes = !list_empty(&dd->fifo_list[WRITE]); /* * batches are currently reads XOR writes */ |
bf09ce56f mq-deadline: Intr... |
284 285 286 |
rq = deadline_next_request(dd, WRITE); if (!rq) rq = deadline_next_request(dd, READ); |
945ffb60c mq-deadline: add ... |
287 288 289 290 291 292 293 294 295 296 297 298 |
if (rq && dd->batching < dd->fifo_batch) /* we have a next request are still entitled to batch */ goto dispatch_request; /* * at this point we are not running a batch. select the appropriate * data direction (read / write) */ if (reads) { BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); |
5700f6917 mq-deadline: Intr... |
299 300 |
if (deadline_fifo_request(dd, WRITE) && (dd->starved++ >= dd->writes_starved)) |
945ffb60c mq-deadline: add ... |
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 |
goto dispatch_writes; data_dir = READ; goto dispatch_find_request; } /* * there are either no reads or writes have been starved */ if (writes) { dispatch_writes: BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); dd->starved = 0; data_dir = WRITE; goto dispatch_find_request; } return NULL; dispatch_find_request: /* * we are not running a batch, find best request for selected data_dir */ |
bf09ce56f mq-deadline: Intr... |
329 330 |
next_rq = deadline_next_request(dd, data_dir); if (deadline_check_fifo(dd, data_dir) || !next_rq) { |
945ffb60c mq-deadline: add ... |
331 332 333 334 335 |
/* * A deadline has expired, the last request was in the other * direction, or we have run out of higher-sectored requests. * Start again from the request with the earliest expiry time. */ |
bf09ce56f mq-deadline: Intr... |
336 |
rq = deadline_fifo_request(dd, data_dir); |
945ffb60c mq-deadline: add ... |
337 338 339 340 341 |
} else { /* * The last req was the same dir and we have a next request in * sort order. No expired requests so continue on from here. */ |
bf09ce56f mq-deadline: Intr... |
342 |
rq = next_rq; |
945ffb60c mq-deadline: add ... |
343 |
} |
5700f6917 mq-deadline: Intr... |
344 345 346 347 348 349 |
/* * For a zoned block device, if we only have writes queued and none of * them can be dispatched, rq will be NULL. */ if (!rq) return NULL; |
945ffb60c mq-deadline: add ... |
350 351 352 353 354 355 356 357 358 |
dd->batching = 0; dispatch_request: /* * rq is the selected appropriate request. */ dd->batching++; deadline_move_request(dd, rq); done: |
5700f6917 mq-deadline: Intr... |
359 360 361 362 |
/* * If the request needs its target zone locked, do it. */ blk_req_zone_write_lock(rq); |
945ffb60c mq-deadline: add ... |
363 364 365 |
rq->rq_flags |= RQF_STARTED; return rq; } |
ca11f209a mq-deadline: make... |
366 367 |
/* * One confusing aspect here is that we get called for a specific |
6353c0a03 block: mq-deadlin... |
368 |
* hardware queue, but we may return a request that is for a |
ca11f209a mq-deadline: make... |
369 370 |
* different hardware queue. This is because mq-deadline has shared * state for all hardware queues, in terms of sorting, FIFOs, etc. |
6353c0a03 block: mq-deadlin... |
371 372 373 374 375 376 377 |
* * For a zoned block device, __dd_dispatch_request() may return NULL * if all the queued write requests are directed at zones that are already * locked due to on-going write requests. In this case, make sure to mark * the queue as needing a restart to ensure that the queue is run again * and the pending writes dispatched once the target zones for the ongoing * write requests are unlocked in dd_finish_request(). |
ca11f209a mq-deadline: make... |
378 |
*/ |
c13660a08 blk-mq-sched: cha... |
379 |
static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
945ffb60c mq-deadline: add ... |
380 381 |
{ struct deadline_data *dd = hctx->queue->elevator->elevator_data; |
c13660a08 blk-mq-sched: cha... |
382 |
struct request *rq; |
945ffb60c mq-deadline: add ... |
383 384 |
spin_lock(&dd->lock); |
ca11f209a mq-deadline: make... |
385 |
rq = __dd_dispatch_request(dd); |
6353c0a03 block: mq-deadlin... |
386 387 388 |
if (!rq && blk_queue_is_zoned(hctx->queue) && !list_empty(&dd->fifo_list[WRITE])) blk_mq_sched_mark_restart_hctx(hctx); |
945ffb60c mq-deadline: add ... |
389 |
spin_unlock(&dd->lock); |
c13660a08 blk-mq-sched: cha... |
390 391 |
return rq; |
945ffb60c mq-deadline: add ... |
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 |
} static void dd_exit_queue(struct elevator_queue *e) { struct deadline_data *dd = e->elevator_data; BUG_ON(!list_empty(&dd->fifo_list[READ])); BUG_ON(!list_empty(&dd->fifo_list[WRITE])); kfree(dd); } /* * initialize elevator private data (deadline_data). */ static int dd_init_queue(struct request_queue *q, struct elevator_type *e) { struct deadline_data *dd; struct elevator_queue *eq; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); if (!dd) { kobject_put(&eq->kobj); return -ENOMEM; } eq->elevator_data = dd; INIT_LIST_HEAD(&dd->fifo_list[READ]); INIT_LIST_HEAD(&dd->fifo_list[WRITE]); dd->sort_list[READ] = RB_ROOT; dd->sort_list[WRITE] = RB_ROOT; dd->fifo_expire[READ] = read_expire; dd->fifo_expire[WRITE] = write_expire; dd->writes_starved = writes_starved; dd->front_merges = 1; dd->fifo_batch = fifo_batch; spin_lock_init(&dd->lock); |
5700f6917 mq-deadline: Intr... |
433 |
spin_lock_init(&dd->zone_lock); |
945ffb60c mq-deadline: add ... |
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 |
INIT_LIST_HEAD(&dd->dispatch); q->elevator = eq; return 0; } static int dd_request_merge(struct request_queue *q, struct request **rq, struct bio *bio) { struct deadline_data *dd = q->elevator->elevator_data; sector_t sector = bio_end_sector(bio); struct request *__rq; if (!dd->front_merges) return ELEVATOR_NO_MERGE; __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); if (__rq) { BUG_ON(sector != blk_rq_pos(__rq)); if (elv_bio_merge_ok(__rq, bio)) { *rq = __rq; return ELEVATOR_FRONT_MERGE; } } return ELEVATOR_NO_MERGE; } static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; |
e4d750c97 block: free merge... |
467 468 |
struct request *free = NULL; bool ret; |
945ffb60c mq-deadline: add ... |
469 470 |
spin_lock(&dd->lock); |
e4d750c97 block: free merge... |
471 |
ret = blk_mq_sched_try_merge(q, bio, &free); |
945ffb60c mq-deadline: add ... |
472 |
spin_unlock(&dd->lock); |
e4d750c97 block: free merge... |
473 474 |
if (free) blk_mq_free_request(free); |
945ffb60c mq-deadline: add ... |
475 476 477 478 479 480 481 482 483 484 485 486 |
return ret; } /* * add rq to rbtree and fifo */ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; const int data_dir = rq_data_dir(rq); |
5700f6917 mq-deadline: Intr... |
487 488 489 490 491 |
/* * This may be a requeue of a write request that has locked its * target zone. If it is the case, this releases the zone lock. */ blk_req_zone_write_unlock(rq); |
945ffb60c mq-deadline: add ... |
492 493 494 495 |
if (blk_mq_sched_try_insert_merge(q, rq)) return; blk_mq_sched_request_inserted(rq); |
57292b58d block: introduce ... |
496 |
if (at_head || blk_rq_is_passthrough(rq)) { |
945ffb60c mq-deadline: add ... |
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 |
if (at_head) list_add(&rq->queuelist, &dd->dispatch); else list_add_tail(&rq->queuelist, &dd->dispatch); } else { deadline_add_rq_rb(dd, rq); if (rq_mergeable(rq)) { elv_rqhash_add(q, rq); if (!q->last_merge) q->last_merge = rq; } /* * set expire time and add to fifo list */ rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); } } static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *list, bool at_head) { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; spin_lock(&dd->lock); while (!list_empty(list)) { struct request *rq; rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); dd_insert_request(hctx, rq, at_head); } spin_unlock(&dd->lock); } |
5700f6917 mq-deadline: Intr... |
534 |
/* |
f3bc78d2d mq-deadline: Make... |
535 536 537 538 539 540 541 542 |
* Nothing to do here. This is defined only to ensure that .finish_request * method is called upon request completion. */ static void dd_prepare_request(struct request *rq, struct bio *bio) { } /* |
5700f6917 mq-deadline: Intr... |
543 544 545 |
* For zoned block devices, write unlock the target zone of * completed write requests. Do this while holding the zone lock * spinlock so that the zone is never unlocked while deadline_fifo_request() |
f3bc78d2d mq-deadline: Make... |
546 547 |
* or deadline_next_request() are executing. This function is called for * all requests, whether or not these requests complete successfully. |
5700f6917 mq-deadline: Intr... |
548 |
*/ |
f3bc78d2d mq-deadline: Make... |
549 |
static void dd_finish_request(struct request *rq) |
5700f6917 mq-deadline: Intr... |
550 551 552 553 554 555 556 557 558 559 560 561 |
{ struct request_queue *q = rq->q; if (blk_queue_is_zoned(q)) { struct deadline_data *dd = q->elevator->elevator_data; unsigned long flags; spin_lock_irqsave(&dd->zone_lock, flags); blk_req_zone_write_unlock(rq); spin_unlock_irqrestore(&dd->zone_lock, flags); } } |
945ffb60c mq-deadline: add ... |
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 |
static bool dd_has_work(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; return !list_empty_careful(&dd->dispatch) || !list_empty_careful(&dd->fifo_list[0]) || !list_empty_careful(&dd->fifo_list[1]); } /* * sysfs parts below */ static ssize_t deadline_var_show(int var, char *page) { return sprintf(page, "%d ", var); } |
235f8da11 block, scheduler:... |
580 581 |
static void deadline_var_store(int *var, const char *page) |
945ffb60c mq-deadline: add ... |
582 583 584 585 |
{ char *p = (char *) page; *var = simple_strtol(p, &p, 10); |
945ffb60c mq-deadline: add ... |
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 |
} #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct deadline_data *dd = e->elevator_data; \ int __data = __VAR; \ if (__CONV) \ __data = jiffies_to_msecs(__data); \ return deadline_var_show(__data, (page)); \ } SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct deadline_data *dd = e->elevator_data; \ int __data; \ |
235f8da11 block, scheduler:... |
609 |
deadline_var_store(&__data, (page)); \ |
945ffb60c mq-deadline: add ... |
610 611 612 613 614 615 616 617 |
if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ if (__CONV) \ *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ |
235f8da11 block, scheduler:... |
618 |
return count; \ |
945ffb60c mq-deadline: add ... |
619 620 621 622 623 624 625 626 627 |
} STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); #undef STORE_FUNCTION #define DD_ATTR(name) \ |
5657a819a block drivers/blo... |
628 |
__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) |
945ffb60c mq-deadline: add ... |
629 630 631 632 633 634 635 636 637 |
static struct elv_fs_entry deadline_attrs[] = { DD_ATTR(read_expire), DD_ATTR(write_expire), DD_ATTR(writes_starved), DD_ATTR(front_merges), DD_ATTR(fifo_batch), __ATTR_NULL }; |
daaadb3e9 mq-deadline: add ... |
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 |
#ifdef CONFIG_BLK_DEBUG_FS #define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \ static void *deadline_##name##_fifo_start(struct seq_file *m, \ loff_t *pos) \ __acquires(&dd->lock) \ { \ struct request_queue *q = m->private; \ struct deadline_data *dd = q->elevator->elevator_data; \ \ spin_lock(&dd->lock); \ return seq_list_start(&dd->fifo_list[ddir], *pos); \ } \ \ static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ loff_t *pos) \ { \ struct request_queue *q = m->private; \ struct deadline_data *dd = q->elevator->elevator_data; \ \ return seq_list_next(v, &dd->fifo_list[ddir], pos); \ } \ \ static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ __releases(&dd->lock) \ { \ struct request_queue *q = m->private; \ struct deadline_data *dd = q->elevator->elevator_data; \ \ spin_unlock(&dd->lock); \ } \ \ static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ .start = deadline_##name##_fifo_start, \ .next = deadline_##name##_fifo_next, \ .stop = deadline_##name##_fifo_stop, \ .show = blk_mq_debugfs_rq_show, \ }; \ \ static int deadline_##name##_next_rq_show(void *data, \ struct seq_file *m) \ { \ struct request_queue *q = data; \ struct deadline_data *dd = q->elevator->elevator_data; \ struct request *rq = dd->next_rq[ddir]; \ \ if (rq) \ __blk_mq_debugfs_rq_show(m, rq); \ return 0; \ } DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read) DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write) #undef DEADLINE_DEBUGFS_DDIR_ATTRS static int deadline_batching_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; seq_printf(m, "%u ", dd->batching); return 0; } static int deadline_starved_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; seq_printf(m, "%u ", dd->starved); return 0; } static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos) __acquires(&dd->lock) { struct request_queue *q = m->private; struct deadline_data *dd = q->elevator->elevator_data; spin_lock(&dd->lock); return seq_list_start(&dd->dispatch, *pos); } static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos) { struct request_queue *q = m->private; struct deadline_data *dd = q->elevator->elevator_data; return seq_list_next(v, &dd->dispatch, pos); } static void deadline_dispatch_stop(struct seq_file *m, void *v) __releases(&dd->lock) { struct request_queue *q = m->private; struct deadline_data *dd = q->elevator->elevator_data; spin_unlock(&dd->lock); } static const struct seq_operations deadline_dispatch_seq_ops = { .start = deadline_dispatch_start, .next = deadline_dispatch_next, .stop = deadline_dispatch_stop, .show = blk_mq_debugfs_rq_show, }; #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \ {#name "_next_rq", 0400, deadline_##name##_next_rq_show} static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { DEADLINE_QUEUE_DDIR_ATTRS(read), DEADLINE_QUEUE_DDIR_ATTRS(write), {"batching", 0400, deadline_batching_show}, {"starved", 0400, deadline_starved_show}, {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, {}, }; #undef DEADLINE_QUEUE_DDIR_ATTRS #endif |
945ffb60c mq-deadline: add ... |
758 759 760 |
static struct elevator_type mq_deadline = { .ops.mq = { .insert_requests = dd_insert_requests, |
c13660a08 blk-mq-sched: cha... |
761 |
.dispatch_request = dd_dispatch_request, |
f3bc78d2d mq-deadline: Make... |
762 763 |
.prepare_request = dd_prepare_request, .finish_request = dd_finish_request, |
945ffb60c mq-deadline: add ... |
764 765 766 767 768 769 770 771 772 773 774 775 |
.next_request = elv_rb_latter_request, .former_request = elv_rb_former_request, .bio_merge = dd_bio_merge, .request_merge = dd_request_merge, .requests_merged = dd_merged_requests, .request_merged = dd_request_merged, .has_work = dd_has_work, .init_sched = dd_init_queue, .exit_sched = dd_exit_queue, }, .uses_mq = true, |
daaadb3e9 mq-deadline: add ... |
776 777 778 |
#ifdef CONFIG_BLK_DEBUG_FS .queue_debugfs_attrs = deadline_queue_debugfs_attrs, #endif |
945ffb60c mq-deadline: add ... |
779 780 |
.elevator_attrs = deadline_attrs, .elevator_name = "mq-deadline", |
4d740bc9f mq-deadline: add ... |
781 |
.elevator_alias = "deadline", |
945ffb60c mq-deadline: add ... |
782 783 |
.elevator_owner = THIS_MODULE, }; |
7de967e76 mq-deadline: Enab... |
784 |
MODULE_ALIAS("mq-deadline-iosched"); |
945ffb60c mq-deadline: add ... |
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 |
static int __init deadline_init(void) { return elv_register(&mq_deadline); } static void __exit deadline_exit(void) { elv_unregister(&mq_deadline); } module_init(deadline_init); module_exit(deadline_exit); MODULE_AUTHOR("Jens Axboe"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MQ deadline IO scheduler"); |