Commit b8aca35af5e9fbc2e41a3ba1b2e66f24e80ca9b6

Authored by Jens Axboe
Committed by Jens Axboe
1 parent 21183b07ee

[PATCH] deadline-iosched: migrate to using the elevator rb functions

This removes the rbtree handling from deadline.

Signed-off-by: Jens Axboe <axboe@suse.de>

Showing 1 changed file with 34 additions and 136 deletions Side-by-side Diff

block/deadline-iosched.c
... ... @@ -57,12 +57,6 @@
57 57 * pre-request data.
58 58 */
59 59 struct deadline_rq {
60   - /*
61   - * rbtree index, key is the starting offset
62   - */
63   - struct rb_node rb_node;
64   - sector_t rb_key;
65   -
66 60 struct request *request;
67 61  
68 62 /*
69 63  
70 64  
71 65  
72 66  
73 67  
74 68  
75 69  
76 70  
77 71  
78 72  
79 73  
80 74  
... ... @@ -78,111 +72,41 @@
78 72  
79 73 #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
80 74  
81   -/*
82   - * rb tree support functions
83   - */
84   -#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
85   -#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
86   -#define rq_rb_key(rq) (rq)->sector
  75 +#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
  76 +#define DRQ_RB_ROOT(dd, drq) RQ_RB_ROOT((drq)->request)
87 77  
88   -static struct deadline_rq *
89   -__deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
90   -{
91   - struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
92   - struct rb_node *parent = NULL;
93   - struct deadline_rq *__drq;
94   -
95   - while (*p) {
96   - parent = *p;
97   - __drq = rb_entry_drq(parent);
98   -
99   - if (drq->rb_key < __drq->rb_key)
100   - p = &(*p)->rb_left;
101   - else if (drq->rb_key > __drq->rb_key)
102   - p = &(*p)->rb_right;
103   - else
104   - return __drq;
105   - }
106   -
107   - rb_link_node(&drq->rb_node, parent, p);
108   - return NULL;
109   -}
110   -
111 78 static void
112   -deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
  79 +deadline_add_drq_rb(struct deadline_data *dd, struct request *rq)
113 80 {
114   - struct deadline_rq *__alias;
  81 + struct rb_root *root = RQ_RB_ROOT(dd, rq);
  82 + struct request *__alias;
115 83  
116   - drq->rb_key = rq_rb_key(drq->request);
117   -
118 84 retry:
119   - __alias = __deadline_add_drq_rb(dd, drq);
120   - if (!__alias) {
121   - rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
122   - return;
  85 + __alias = elv_rb_add(root, rq);
  86 + if (unlikely(__alias)) {
  87 + deadline_move_request(dd, RQ_DATA(__alias));
  88 + goto retry;
123 89 }
124   -
125   - deadline_move_request(dd, __alias);
126   - goto retry;
127 90 }
128 91  
129 92 static inline void
130 93 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
131 94 {
132   - const int data_dir = rq_data_dir(drq->request);
  95 + struct request *rq = drq->request;
  96 + const int data_dir = rq_data_dir(rq);
133 97  
134 98 if (dd->next_drq[data_dir] == drq) {
135   - struct rb_node *rbnext = rb_next(&drq->rb_node);
  99 + struct rb_node *rbnext = rb_next(&rq->rb_node);
136 100  
137 101 dd->next_drq[data_dir] = NULL;
138 102 if (rbnext)
139   - dd->next_drq[data_dir] = rb_entry_drq(rbnext);
  103 + dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext));
140 104 }
141 105  
142   - BUG_ON(!RB_EMPTY_NODE(&drq->rb_node));
143   - rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
144   - RB_CLEAR_NODE(&drq->rb_node);
  106 + elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
145 107 }
146 108  
147   -static struct request *
148   -deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
149   -{
150   - struct rb_node *n = dd->sort_list[data_dir].rb_node;
151   - struct deadline_rq *drq;
152   -
153   - while (n) {
154   - drq = rb_entry_drq(n);
155   -
156   - if (sector < drq->rb_key)
157   - n = n->rb_left;
158   - else if (sector > drq->rb_key)
159   - n = n->rb_right;
160   - else
161   - return drq->request;
162   - }
163   -
164   - return NULL;
165   -}
166   -
167 109 /*
168   - * deadline_find_first_drq finds the first (lowest sector numbered) request
169   - * for the specified data_dir. Used to sweep back to the start of the disk
170   - * (1-way elevator) after we process the last (highest sector) request.
171   - */
172   -static struct deadline_rq *
173   -deadline_find_first_drq(struct deadline_data *dd, int data_dir)
174   -{
175   - struct rb_node *n = dd->sort_list[data_dir].rb_node;
176   -
177   - for (;;) {
178   - if (n->rb_left == NULL)
179   - return rb_entry_drq(n);
180   -
181   - n = n->rb_left;
182   - }
183   -}
184   -
185   -/*
186 110 * add drq to rbtree and fifo
187 111 */
188 112 static void
... ... @@ -192,7 +116,7 @@
192 116 struct deadline_rq *drq = RQ_DATA(rq);
193 117 const int data_dir = rq_data_dir(drq->request);
194 118  
195   - deadline_add_drq_rb(dd, drq);
  119 + deadline_add_drq_rb(dd, rq);
196 120  
197 121 /*
198 122 * set expire time (only used for reads) and add to fifo list
199 123  
200 124  
... ... @@ -224,11 +148,11 @@
224 148 * check for front merge
225 149 */
226 150 if (dd->front_merges) {
227   - sector_t rb_key = bio->bi_sector + bio_sectors(bio);
  151 + sector_t sector = bio->bi_sector + bio_sectors(bio);
228 152  
229   - __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
  153 + __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
230 154 if (__rq) {
231   - BUG_ON(rb_key != rq_rb_key(__rq));
  155 + BUG_ON(sector != __rq->sector);
232 156  
233 157 if (elv_rq_merge_ok(__rq, bio)) {
234 158 ret = ELEVATOR_FRONT_MERGE;
235 159  
236 160  
... ... @@ -243,17 +167,17 @@
243 167 return ret;
244 168 }
245 169  
246   -static void deadline_merged_request(request_queue_t *q, struct request *req)
  170 +static void deadline_merged_request(request_queue_t *q, struct request *req,
  171 + int type)
247 172 {
248 173 struct deadline_data *dd = q->elevator->elevator_data;
249   - struct deadline_rq *drq = RQ_DATA(req);
250 174  
251 175 /*
252 176 * if the merge was a front merge, we need to reposition request
253 177 */
254   - if (rq_rb_key(req) != drq->rb_key) {
255   - deadline_del_drq_rb(dd, drq);
256   - deadline_add_drq_rb(dd, drq);
  178 + if (type == ELEVATOR_FRONT_MERGE) {
  179 + elv_rb_del(RQ_RB_ROOT(dd, req), req);
  180 + deadline_add_drq_rb(dd, req);
257 181 }
258 182 }
259 183  
260 184  
... ... @@ -261,18 +185,12 @@
261 185 deadline_merged_requests(request_queue_t *q, struct request *req,
262 186 struct request *next)
263 187 {
264   - struct deadline_data *dd = q->elevator->elevator_data;
265 188 struct deadline_rq *drq = RQ_DATA(req);
266 189 struct deadline_rq *dnext = RQ_DATA(next);
267 190  
268 191 BUG_ON(!drq);
269 192 BUG_ON(!dnext);
270 193  
271   - if (rq_rb_key(req) != drq->rb_key) {
272   - deadline_del_drq_rb(dd, drq);
273   - deadline_add_drq_rb(dd, drq);
274   - }
275   -
276 194 /*
277 195 * if dnext expires before drq, assign its expire time to drq
278 196 * and move into dnext position (dnext will be deleted) in fifo
279 197  
... ... @@ -308,14 +226,15 @@
308 226 static void
309 227 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
310 228 {
311   - const int data_dir = rq_data_dir(drq->request);
312   - struct rb_node *rbnext = rb_next(&drq->rb_node);
  229 + struct request *rq = drq->request;
  230 + const int data_dir = rq_data_dir(rq);
  231 + struct rb_node *rbnext = rb_next(&rq->rb_node);
313 232  
314 233 dd->next_drq[READ] = NULL;
315 234 dd->next_drq[WRITE] = NULL;
316 235  
317 236 if (rbnext)
318   - dd->next_drq[data_dir] = rb_entry_drq(rbnext);
  237 + dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext));
319 238  
320 239 dd->last_sector = drq->request->sector + drq->request->nr_sectors;
321 240  
322 241  
... ... @@ -426,13 +345,17 @@
426 345 */
427 346 drq = dd->next_drq[data_dir];
428 347 } else {
  348 + struct rb_node *n;
  349 +
429 350 /*
430 351 * The last req was the other direction or we have run out of
431 352 * higher-sectored requests. Go back to the lowest sectored
432 353 * request (1 way elevator) and start a new batch.
433 354 */
434 355 dd->batching = 0;
435   - drq = deadline_find_first_drq(dd, data_dir);
  356 + n = rb_first(&dd->sort_list[data_dir]);
  357 + if (n)
  358 + drq = RQ_DATA(rb_entry_rq(n));
436 359 }
437 360  
438 361 dispatch_request:
... ... @@ -453,30 +376,6 @@
453 376 && list_empty(&dd->fifo_list[READ]);
454 377 }
455 378  
456   -static struct request *
457   -deadline_former_request(request_queue_t *q, struct request *rq)
458   -{
459   - struct deadline_rq *drq = RQ_DATA(rq);
460   - struct rb_node *rbprev = rb_prev(&drq->rb_node);
461   -
462   - if (rbprev)
463   - return rb_entry_drq(rbprev)->request;
464   -
465   - return NULL;
466   -}
467   -
468   -static struct request *
469   -deadline_latter_request(request_queue_t *q, struct request *rq)
470   -{
471   - struct deadline_rq *drq = RQ_DATA(rq);
472   - struct rb_node *rbnext = rb_next(&drq->rb_node);
473   -
474   - if (rbnext)
475   - return rb_entry_drq(rbnext)->request;
476   -
477   - return NULL;
478   -}
479   -
480 379 static void deadline_exit_queue(elevator_t *e)
481 380 {
482 381 struct deadline_data *dd = e->elevator_data;
... ... @@ -542,7 +441,6 @@
542 441 drq = mempool_alloc(dd->drq_pool, gfp_mask);
543 442 if (drq) {
544 443 memset(drq, 0, sizeof(*drq));
545   - RB_CLEAR_NODE(&drq->rb_node);
546 444 drq->request = rq;
547 445  
548 446 INIT_LIST_HEAD(&drq->fifo);
... ... @@ -633,8 +531,8 @@
633 531 .elevator_dispatch_fn = deadline_dispatch_requests,
634 532 .elevator_add_req_fn = deadline_add_request,
635 533 .elevator_queue_empty_fn = deadline_queue_empty,
636   - .elevator_former_req_fn = deadline_former_request,
637   - .elevator_latter_req_fn = deadline_latter_request,
  534 + .elevator_former_req_fn = elv_rb_former_request,
  535 + .elevator_latter_req_fn = elv_rb_latter_request,
638 536 .elevator_set_req_fn = deadline_set_request,
639 537 .elevator_put_req_fn = deadline_put_request,
640 538 .elevator_init_fn = deadline_init_queue,