Commit 2e662b65f05d550b6799ed6bfa9963b82279e6b7

Authored by Jens Axboe
Committed by Jens Axboe
1 parent 10fd48f237

[PATCH] elevator: abstract out the rbtree sort handling

The rbtree sort/lookup/reposition logic is mostly duplicated in
cfq/deadline/as, so move it to the elevator core. The io schedulers
still provide the actual rb root, as we don't want to impose any sort
of specific handling on the schedulers.

Introduce the helpers and rb_node in struct request to help migrate the
IO schedulers.

Signed-off-by: Jens Axboe <axboe@suse.de>

Showing 4 changed files with 130 additions and 19 deletions Side-by-side Diff

... ... @@ -239,6 +239,8 @@
239 239 return ret;
240 240 }
241 241  
  242 +EXPORT_SYMBOL(elevator_init);
  243 +
242 244 void elevator_exit(elevator_t *e)
243 245 {
244 246 mutex_lock(&e->sysfs_lock);
... ... @@ -250,6 +252,8 @@
250 252 kobject_put(&e->kobj);
251 253 }
252 254  
  255 +EXPORT_SYMBOL(elevator_exit);
  256 +
253 257 static inline void __elv_rqhash_del(struct request *rq)
254 258 {
255 259 hlist_del_init(&rq->hash);
256 260  
... ... @@ -298,9 +302,68 @@
298 302 }
299 303  
300 304 /*
  305 + * RB-tree support functions for inserting/lookup/removal of requests
  306 + * in a sorted RB tree.
  307 + */
  308 +struct request *elv_rb_add(struct rb_root *root, struct request *rq)
  309 +{
  310 + struct rb_node **p = &root->rb_node;
  311 + struct rb_node *parent = NULL;
  312 + struct request *__rq;
  313 +
  314 + while (*p) {
  315 + parent = *p;
  316 + __rq = rb_entry(parent, struct request, rb_node);
  317 +
  318 + if (rq->sector < __rq->sector)
  319 + p = &(*p)->rb_left;
  320 + else if (rq->sector > __rq->sector)
  321 + p = &(*p)->rb_right;
  322 + else
  323 + return __rq;
  324 + }
  325 +
  326 + rb_link_node(&rq->rb_node, parent, p);
  327 + rb_insert_color(&rq->rb_node, root);
  328 + return NULL;
  329 +}
  330 +
  331 +EXPORT_SYMBOL(elv_rb_add);
  332 +
  333 +void elv_rb_del(struct rb_root *root, struct request *rq)
  334 +{
  335 + BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  336 + rb_erase(&rq->rb_node, root);
  337 + RB_CLEAR_NODE(&rq->rb_node);
  338 +}
  339 +
  340 +EXPORT_SYMBOL(elv_rb_del);
  341 +
  342 +struct request *elv_rb_find(struct rb_root *root, sector_t sector)
  343 +{
  344 + struct rb_node *n = root->rb_node;
  345 + struct request *rq;
  346 +
  347 + while (n) {
  348 + rq = rb_entry(n, struct request, rb_node);
  349 +
  350 + if (sector < rq->sector)
  351 + n = n->rb_left;
  352 + else if (sector > rq->sector)
  353 + n = n->rb_right;
  354 + else
  355 + return rq;
  356 + }
  357 +
  358 + return NULL;
  359 +}
  360 +
  361 +EXPORT_SYMBOL(elv_rb_find);
  362 +
  363 +/*
301 364 * Insert rq into dispatch queue of q. Queue lock must be held on
302   - * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
303   - * appended to the dispatch queue. To be used by specific elevators.
  365 + * entry. rq is sort insted into the dispatch queue. To be used by
  366 + * specific elevators.
304 367 */
305 368 void elv_dispatch_sort(request_queue_t *q, struct request *rq)
306 369 {
307 370  
... ... @@ -335,8 +398,12 @@
335 398 list_add(&rq->queuelist, entry);
336 399 }
337 400  
  401 +EXPORT_SYMBOL(elv_dispatch_sort);
  402 +
338 403 /*
339   - * This should be in elevator.h, but that requires pulling in rq and q
  404 + * Insert rq into dispatch queue of q. Queue lock must be held on
  405 + * entry. rq is added to the back of the dispatch queue. To be used by
  406 + * specific elevators.
340 407 */
341 408 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
342 409 {
... ... @@ -352,6 +419,8 @@
352 419 list_add_tail(&rq->queuelist, &q->queue_head);
353 420 }
354 421  
  422 +EXPORT_SYMBOL(elv_dispatch_add_tail);
  423 +
355 424 int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
356 425 {
357 426 elevator_t *e = q->elevator;
358 427  
359 428  
... ... @@ -384,14 +453,15 @@
384 453 return ELEVATOR_NO_MERGE;
385 454 }
386 455  
387   -void elv_merged_request(request_queue_t *q, struct request *rq)
  456 +void elv_merged_request(request_queue_t *q, struct request *rq, int type)
388 457 {
389 458 elevator_t *e = q->elevator;
390 459  
391 460 if (e->ops->elevator_merged_fn)
392   - e->ops->elevator_merged_fn(q, rq);
  461 + e->ops->elevator_merged_fn(q, rq, type);
393 462  
394   - elv_rqhash_reposition(q, rq);
  463 + if (type == ELEVATOR_BACK_MERGE)
  464 + elv_rqhash_reposition(q, rq);
395 465  
396 466 q->last_merge = rq;
397 467 }
... ... @@ -577,6 +647,8 @@
577 647 elv_insert(q, rq, where);
578 648 }
579 649  
  650 +EXPORT_SYMBOL(__elv_add_request);
  651 +
580 652 void elv_add_request(request_queue_t *q, struct request *rq, int where,
581 653 int plug)
582 654 {
... ... @@ -587,6 +659,8 @@
587 659 spin_unlock_irqrestore(q->queue_lock, flags);
588 660 }
589 661  
  662 +EXPORT_SYMBOL(elv_add_request);
  663 +
590 664 static inline struct request *__elv_next_request(request_queue_t *q)
591 665 {
592 666 struct request *rq;
... ... @@ -670,6 +744,8 @@
670 744 return rq;
671 745 }
672 746  
  747 +EXPORT_SYMBOL(elv_next_request);
  748 +
673 749 void elv_dequeue_request(request_queue_t *q, struct request *rq)
674 750 {
675 751 BUG_ON(list_empty(&rq->queuelist));
... ... @@ -686,6 +762,8 @@
686 762 q->in_flight++;
687 763 }
688 764  
  765 +EXPORT_SYMBOL(elv_dequeue_request);
  766 +
689 767 int elv_queue_empty(request_queue_t *q)
690 768 {
691 769 elevator_t *e = q->elevator;
... ... @@ -699,6 +777,8 @@
699 777 return 1;
700 778 }
701 779  
  780 +EXPORT_SYMBOL(elv_queue_empty);
  781 +
702 782 struct request *elv_latter_request(request_queue_t *q, struct request *rq)
703 783 {
704 784 elevator_t *e = q->elevator;
... ... @@ -1025,12 +1105,27 @@
1025 1105 return len;
1026 1106 }
1027 1107  
1028   -EXPORT_SYMBOL(elv_dispatch_sort);
1029   -EXPORT_SYMBOL(elv_add_request);
1030   -EXPORT_SYMBOL(__elv_add_request);
1031   -EXPORT_SYMBOL(elv_next_request);
1032   -EXPORT_SYMBOL(elv_dequeue_request);
1033   -EXPORT_SYMBOL(elv_queue_empty);
1034   -EXPORT_SYMBOL(elevator_exit);
1035   -EXPORT_SYMBOL(elevator_init);
  1108 +struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
  1109 +{
  1110 + struct rb_node *rbprev = rb_prev(&rq->rb_node);
  1111 +
  1112 + if (rbprev)
  1113 + return rb_entry_rq(rbprev);
  1114 +
  1115 + return NULL;
  1116 +}
  1117 +
  1118 +EXPORT_SYMBOL(elv_rb_former_request);
  1119 +
  1120 +struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
  1121 +{
  1122 + struct rb_node *rbnext = rb_next(&rq->rb_node);
  1123 +
  1124 + if (rbnext)
  1125 + return rb_entry_rq(rbnext);
  1126 +
  1127 + return NULL;
  1128 +}
  1129 +
  1130 +EXPORT_SYMBOL(elv_rb_latter_request);
... ... @@ -281,11 +281,12 @@
281 281 {
282 282 INIT_LIST_HEAD(&rq->queuelist);
283 283 INIT_LIST_HEAD(&rq->donelist);
284   - INIT_HLIST_NODE(&rq->hash);
285 284  
286 285 rq->errors = 0;
287 286 rq->rq_status = RQ_ACTIVE;
288 287 rq->bio = rq->biotail = NULL;
  288 + INIT_HLIST_NODE(&rq->hash);
  289 + RB_CLEAR_NODE(&rq->rb_node);
289 290 rq->ioprio = 0;
290 291 rq->buffer = NULL;
291 292 rq->ref_count = 1;
... ... @@ -2943,7 +2944,7 @@
2943 2944 req->ioprio = ioprio_best(req->ioprio, prio);
2944 2945 drive_stat_acct(req, nr_sectors, 0);
2945 2946 if (!attempt_back_merge(q, req))
2946   - elv_merged_request(q, req);
  2947 + elv_merged_request(q, req, el_ret);
2947 2948 goto out;
2948 2949  
2949 2950 case ELEVATOR_FRONT_MERGE:
... ... @@ -2970,7 +2971,7 @@
2970 2971 req->ioprio = ioprio_best(req->ioprio, prio);
2971 2972 drive_stat_acct(req, nr_sectors, 0);
2972 2973 if (!attempt_front_merge(q, req))
2973   - elv_merged_request(q, req);
  2974 + elv_merged_request(q, req, el_ret);
2974 2975 goto out;
2975 2976  
2976 2977 /* ELV_NO_MERGE: elevator says don't/can't merge. */
include/linux/blkdev.h
... ... @@ -230,6 +230,7 @@
230 230 struct bio *biotail;
231 231  
232 232 struct hlist_node hash; /* merge hash */
  233 + struct rb_node rb_node; /* sort/lookup */
233 234  
234 235 void *elevator_private;
235 236 void *completion_data;
include/linux/elevator.h
... ... @@ -6,7 +6,7 @@
6 6  
7 7 typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *);
8 8  
9   -typedef void (elevator_merged_fn) (request_queue_t *, struct request *);
  9 +typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int);
10 10  
11 11 typedef int (elevator_dispatch_fn) (request_queue_t *, int);
12 12  
... ... @@ -96,7 +96,7 @@
96 96 extern int elv_merge(request_queue_t *, struct request **, struct bio *);
97 97 extern void elv_merge_requests(request_queue_t *, struct request *,
98 98 struct request *);
99   -extern void elv_merged_request(request_queue_t *, struct request *);
  99 +extern void elv_merged_request(request_queue_t *, struct request *, int);
100 100 extern void elv_dequeue_request(request_queue_t *, struct request *);
101 101 extern void elv_requeue_request(request_queue_t *, struct request *);
102 102 extern int elv_queue_empty(request_queue_t *);
... ... @@ -127,6 +127,19 @@
127 127 extern int elv_rq_merge_ok(struct request *, struct bio *);
128 128  
129 129 /*
  130 + * Helper functions.
  131 + */
  132 +extern struct request *elv_rb_former_request(request_queue_t *, struct request *);
  133 +extern struct request *elv_rb_latter_request(request_queue_t *, struct request *);
  134 +
  135 +/*
  136 + * rb support functions.
  137 + */
  138 +extern struct request *elv_rb_add(struct rb_root *, struct request *);
  139 +extern void elv_rb_del(struct rb_root *, struct request *);
  140 +extern struct request *elv_rb_find(struct rb_root *, sector_t);
  141 +
  142 +/*
130 143 * Return values from elevator merger
131 144 */
132 145 #define ELEVATOR_NO_MERGE 0
... ... @@ -151,6 +164,7 @@
151 164 };
152 165  
153 166 #define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors)
  167 +#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
154 168  
155 169 #endif