Blame view
block/elevator.c
25.1 KB
1da177e4c
|
1 |
/* |
1da177e4c
|
2 3 4 5 |
* Block device elevator/IO-scheduler. * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * |
0fe234795
|
6 |
* 30042000 Jens Axboe <axboe@kernel.dk> : |
1da177e4c
|
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
* * Split the elevator a bit so that it is possible to choose a different * one or even write a new "plug in". There are three pieces: * - elevator_fn, inserts a new request in the queue list * - elevator_merge_fn, decides whether a new buffer can be merged with * an existing request * - elevator_dequeue_fn, called when a request is taken off the active list * * 20082000 Dave Jones <davej@suse.de> : * Removed tests for max-bomb-segments, which was breaking elvtune * when run without -bN * * Jens: * - Rework again to work with bio instead of buffer_heads * - loose bi_dev comparisons, partition handling is right now * - completely modularize elevator setup and teardown * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> |
1da177e4c
|
30 31 32 33 |
#include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/compiler.h> |
cb98fc8bb
|
34 |
#include <linux/delay.h> |
2056a782f
|
35 |
#include <linux/blktrace_api.h> |
9817064b6
|
36 |
#include <linux/hash.h> |
1da177e4c
|
37 38 39 40 41 42 43 |
#include <asm/uaccess.h> static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); /* |
9817064b6
|
44 45 46 47 48 49 50 51 52 53 |
* Merge hash stuff. */ static const int elv_hash_shift = 6; #define ELV_HASH_BLOCK(sec) ((sec) >> 3) #define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) #define ELV_HASH_ENTRIES (1 << elv_hash_shift) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) /* |
da7752650
|
54 55 56 57 58 |
* Query io scheduler to see if the current process issuing bio may be * merged with rq. */ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) { |
165125e1e
|
59 |
struct request_queue *q = rq->q; |
da7752650
|
60 61 62 63 64 65 66 67 68 |
elevator_t *e = q->elevator; if (e->ops->elevator_allow_merge_fn) return e->ops->elevator_allow_merge_fn(q, rq, bio); return 1; } /* |
1da177e4c
|
69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
* can we safely merge with this request? */ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) { if (!rq_mergeable(rq)) return 0; /* * different data direction or already started, don't merge */ if (bio_data_dir(bio) != rq_data_dir(rq)) return 0; /* |
da7752650
|
83 |
* must be same device and not a special request |
1da177e4c
|
84 |
*/ |
bb4067e34
|
85 |
if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) |
da7752650
|
86 87 88 89 |
return 0; if (!elv_iosched_allow_merge(rq, bio)) return 0; |
1da177e4c
|
90 |
|
da7752650
|
91 |
return 1; |
1da177e4c
|
92 93 |
} EXPORT_SYMBOL(elv_rq_merge_ok); |
769db45b7
|
94 |
static inline int elv_try_merge(struct request *__rq, struct bio *bio) |
1da177e4c
|
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
{ int ret = ELEVATOR_NO_MERGE; /* * we can merge and sequence is ok, check if it's possible */ if (elv_rq_merge_ok(__rq, bio)) { if (__rq->sector + __rq->nr_sectors == bio->bi_sector) ret = ELEVATOR_BACK_MERGE; else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) ret = ELEVATOR_FRONT_MERGE; } return ret; } |
1da177e4c
|
110 |
|
1da177e4c
|
111 112 |
static struct elevator_type *elevator_find(const char *name) { |
a22b169df
|
113 |
struct elevator_type *e; |
1da177e4c
|
114 |
|
70cee26e0
|
115 |
list_for_each_entry(e, &elv_list, list) { |
a22b169df
|
116 117 |
if (!strcmp(e->elevator_name, name)) return e; |
1da177e4c
|
118 |
} |
1da177e4c
|
119 |
|
a22b169df
|
120 |
return NULL; |
1da177e4c
|
121 122 123 124 125 126 127 128 129 |
} static void elevator_put(struct elevator_type *e) { module_put(e->elevator_owner); } static struct elevator_type *elevator_get(const char *name) { |
2824bc932
|
130 |
struct elevator_type *e; |
1da177e4c
|
131 |
|
2a12dcd71
|
132 |
spin_lock(&elv_list_lock); |
2824bc932
|
133 134 135 136 |
e = elevator_find(name); if (e && !try_module_get(e->elevator_owner)) e = NULL; |
2a12dcd71
|
137 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
138 139 140 |
return e; } |
165125e1e
|
141 142 |
static void *elevator_init_queue(struct request_queue *q, struct elevator_queue *eq) |
1da177e4c
|
143 |
{ |
bb37b94c6
|
144 |
return eq->ops->elevator_init_fn(q); |
bc1c11697
|
145 |
} |
1da177e4c
|
146 |
|
165125e1e
|
147 |
static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, |
bc1c11697
|
148 149 |
void *data) { |
1da177e4c
|
150 |
q->elevator = eq; |
bc1c11697
|
151 |
eq->elevator_data = data; |
1da177e4c
|
152 153 154 |
} static char chosen_elevator[16]; |
5f0039764
|
155 |
static int __init elevator_setup(char *str) |
1da177e4c
|
156 |
{ |
752a3b796
|
157 158 159 160 |
/* * Be backwards-compatible with previous kernels, so users * won't get the wrong elevator. */ |
5f0039764
|
161 |
if (!strcmp(str, "as")) |
752a3b796
|
162 |
strcpy(chosen_elevator, "anticipatory"); |
cff3ba220
|
163 |
else |
5f0039764
|
164 |
strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); |
9b41046cd
|
165 |
return 1; |
1da177e4c
|
166 167 168 |
} __setup("elevator=", elevator_setup); |
3d1ab40f4
|
169 |
static struct kobj_type elv_ktype; |
165125e1e
|
170 171 |
static elevator_t *elevator_alloc(struct request_queue *q, struct elevator_type *e) |
3d1ab40f4
|
172 |
{ |
9817064b6
|
173 174 |
elevator_t *eq; int i; |
94f6030ca
|
175 |
eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node); |
9817064b6
|
176 177 |
if (unlikely(!eq)) goto err; |
9817064b6
|
178 179 180 |
eq->ops = &e->ops; eq->elevator_type = e; kobject_init(&eq->kobj); |
19c38de88
|
181 |
kobject_set_name(&eq->kobj, "%s", "iosched"); |
9817064b6
|
182 183 |
eq->kobj.ktype = &elv_ktype; mutex_init(&eq->sysfs_lock); |
b5deef901
|
184 185 |
eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL, q->node); |
9817064b6
|
186 187 188 189 190 |
if (!eq->hash) goto err; for (i = 0; i < ELV_HASH_ENTRIES; i++) INIT_HLIST_HEAD(&eq->hash[i]); |
3d1ab40f4
|
191 |
return eq; |
9817064b6
|
192 193 194 195 |
err: kfree(eq); elevator_put(e); return NULL; |
3d1ab40f4
|
196 197 198 199 200 |
} static void elevator_release(struct kobject *kobj) { elevator_t *e = container_of(kobj, elevator_t, kobj); |
9817064b6
|
201 |
|
3d1ab40f4
|
202 |
elevator_put(e->elevator_type); |
9817064b6
|
203 |
kfree(e->hash); |
3d1ab40f4
|
204 205 |
kfree(e); } |
165125e1e
|
206 |
int elevator_init(struct request_queue *q, char *name) |
1da177e4c
|
207 208 209 210 |
{ struct elevator_type *e = NULL; struct elevator_queue *eq; int ret = 0; |
bc1c11697
|
211 |
void *data; |
1da177e4c
|
212 |
|
cb98fc8bb
|
213 214 215 216 |
INIT_LIST_HEAD(&q->queue_head); q->last_merge = NULL; q->end_sector = 0; q->boundary_rq = NULL; |
cb98fc8bb
|
217 |
|
5f0039764
|
218 |
if (name && !(e = elevator_get(name))) |
1da177e4c
|
219 |
return -EINVAL; |
248d5ca5e
|
220 221 222 223 224 225 226 227 |
if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator))) printk("I/O scheduler %s not found ", chosen_elevator); if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) { printk("Default I/O scheduler not found, using no-op "); e = elevator_get("noop"); |
5f0039764
|
228 |
} |
b5deef901
|
229 |
eq = elevator_alloc(q, e); |
3d1ab40f4
|
230 |
if (!eq) |
1da177e4c
|
231 |
return -ENOMEM; |
1da177e4c
|
232 |
|
bc1c11697
|
233 234 |
data = elevator_init_queue(q, eq); if (!data) { |
3d1ab40f4
|
235 |
kobject_put(&eq->kobj); |
bc1c11697
|
236 237 |
return -ENOMEM; } |
1da177e4c
|
238 |
|
bc1c11697
|
239 |
elevator_attach(q, eq, data); |
1da177e4c
|
240 241 |
return ret; } |
2e662b65f
|
242 |
EXPORT_SYMBOL(elevator_init); |
1da177e4c
|
243 244 |
void elevator_exit(elevator_t *e) { |
3d1ab40f4
|
245 |
mutex_lock(&e->sysfs_lock); |
1da177e4c
|
246 247 |
if (e->ops->elevator_exit_fn) e->ops->elevator_exit_fn(e); |
3d1ab40f4
|
248 249 |
e->ops = NULL; mutex_unlock(&e->sysfs_lock); |
1da177e4c
|
250 |
|
3d1ab40f4
|
251 |
kobject_put(&e->kobj); |
1da177e4c
|
252 |
} |
2e662b65f
|
253 |
EXPORT_SYMBOL(elevator_exit); |
165125e1e
|
254 |
static void elv_activate_rq(struct request_queue *q, struct request *rq) |
cad975164
|
255 256 257 258 259 260 |
{ elevator_t *e = q->elevator; if (e->ops->elevator_activate_req_fn) e->ops->elevator_activate_req_fn(q, rq); } |
165125e1e
|
261 |
static void elv_deactivate_rq(struct request_queue *q, struct request *rq) |
cad975164
|
262 263 264 265 266 267 |
{ elevator_t *e = q->elevator; if (e->ops->elevator_deactivate_req_fn) e->ops->elevator_deactivate_req_fn(q, rq); } |
9817064b6
|
268 269 270 271 |
static inline void __elv_rqhash_del(struct request *rq) { hlist_del_init(&rq->hash); } |
165125e1e
|
272 |
static void elv_rqhash_del(struct request_queue *q, struct request *rq) |
9817064b6
|
273 274 275 276 |
{ if (ELV_ON_HASH(rq)) __elv_rqhash_del(rq); } |
165125e1e
|
277 |
static void elv_rqhash_add(struct request_queue *q, struct request *rq) |
9817064b6
|
278 279 280 281 282 283 |
{ elevator_t *e = q->elevator; BUG_ON(ELV_ON_HASH(rq)); hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); } |
165125e1e
|
284 |
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
9817064b6
|
285 286 287 288 |
{ __elv_rqhash_del(rq); elv_rqhash_add(q, rq); } |
165125e1e
|
289 |
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
9817064b6
|
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 |
{ elevator_t *e = q->elevator; struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; struct hlist_node *entry, *next; struct request *rq; hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { BUG_ON(!ELV_ON_HASH(rq)); if (unlikely(!rq_mergeable(rq))) { __elv_rqhash_del(rq); continue; } if (rq_hash_key(rq) == offset) return rq; } return NULL; } |
8922e16cf
|
310 |
/* |
2e662b65f
|
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 |
* RB-tree support functions for inserting/lookup/removal of requests * in a sorted RB tree. */ struct request *elv_rb_add(struct rb_root *root, struct request *rq) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct request *__rq; while (*p) { parent = *p; __rq = rb_entry(parent, struct request, rb_node); if (rq->sector < __rq->sector) p = &(*p)->rb_left; else if (rq->sector > __rq->sector) p = &(*p)->rb_right; else return __rq; } rb_link_node(&rq->rb_node, parent, p); rb_insert_color(&rq->rb_node, root); return NULL; } EXPORT_SYMBOL(elv_rb_add); void elv_rb_del(struct rb_root *root, struct request *rq) { BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); rb_erase(&rq->rb_node, root); RB_CLEAR_NODE(&rq->rb_node); } EXPORT_SYMBOL(elv_rb_del); struct request *elv_rb_find(struct rb_root *root, sector_t sector) { struct rb_node *n = root->rb_node; struct request *rq; while (n) { rq = rb_entry(n, struct request, rb_node); if (sector < rq->sector) n = n->rb_left; else if (sector > rq->sector) n = n->rb_right; else return rq; } return NULL; } EXPORT_SYMBOL(elv_rb_find); /* |
8922e16cf
|
370 |
* Insert rq into dispatch queue of q. Queue lock must be held on |
2e662b65f
|
371 372 |
* entry. rq is sort insted into the dispatch queue. To be used by * specific elevators. |
8922e16cf
|
373 |
*/ |
165125e1e
|
374 |
void elv_dispatch_sort(struct request_queue *q, struct request *rq) |
8922e16cf
|
375 376 |
{ sector_t boundary; |
8922e16cf
|
377 |
struct list_head *entry; |
06b86245c
|
378 379 |
if (q->last_merge == rq) q->last_merge = NULL; |
9817064b6
|
380 381 |
elv_rqhash_del(q, rq); |
15853af9f
|
382 |
q->nr_sorted--; |
06b86245c
|
383 |
|
1b47f531e
|
384 |
boundary = q->end_sector; |
cb19833dc
|
385 |
|
8922e16cf
|
386 387 |
list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); |
783660b2f
|
388 389 |
if (rq_data_dir(rq) != rq_data_dir(pos)) break; |
4aff5e233
|
390 |
if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) |
8922e16cf
|
391 392 393 394 395 396 397 398 399 400 401 402 403 404 |
break; if (rq->sector >= boundary) { if (pos->sector < boundary) continue; } else { if (pos->sector >= boundary) break; } if (rq->sector >= pos->sector) break; } list_add(&rq->queuelist, entry); } |
2e662b65f
|
405 |
EXPORT_SYMBOL(elv_dispatch_sort); |
9817064b6
|
406 |
/* |
2e662b65f
|
407 408 409 |
* Insert rq into dispatch queue of q. Queue lock must be held on * entry. rq is added to the back of the dispatch queue. To be used by * specific elevators. |
9817064b6
|
410 411 412 413 414 415 416 417 418 419 420 421 422 423 |
*/ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) { if (q->last_merge == rq) q->last_merge = NULL; elv_rqhash_del(q, rq); q->nr_sorted--; q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; list_add_tail(&rq->queuelist, &q->queue_head); } |
2e662b65f
|
424 |
EXPORT_SYMBOL(elv_dispatch_add_tail); |
165125e1e
|
425 |
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) |
1da177e4c
|
426 427 |
{ elevator_t *e = q->elevator; |
9817064b6
|
428 |
struct request *__rq; |
06b86245c
|
429 |
int ret; |
9817064b6
|
430 431 432 |
/* * First try one-hit cache. */ |
06b86245c
|
433 434 435 436 437 438 439 |
if (q->last_merge) { ret = elv_try_merge(q->last_merge, bio); if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; return ret; } } |
1da177e4c
|
440 |
|
9817064b6
|
441 442 443 444 445 446 447 448 |
/* * See if our hash lookup can find a potential backmerge. */ __rq = elv_rqhash_find(q, bio->bi_sector); if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_BACK_MERGE; } |
1da177e4c
|
449 450 451 452 453 |
if (e->ops->elevator_merge_fn) return e->ops->elevator_merge_fn(q, req, bio); return ELEVATOR_NO_MERGE; } |
165125e1e
|
454 |
void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
1da177e4c
|
455 456 457 458 |
{ elevator_t *e = q->elevator; if (e->ops->elevator_merged_fn) |
2e662b65f
|
459 |
e->ops->elevator_merged_fn(q, rq, type); |
06b86245c
|
460 |
|
2e662b65f
|
461 462 |
if (type == ELEVATOR_BACK_MERGE) elv_rqhash_reposition(q, rq); |
9817064b6
|
463 |
|
06b86245c
|
464 |
q->last_merge = rq; |
1da177e4c
|
465 |
} |
165125e1e
|
466 |
void elv_merge_requests(struct request_queue *q, struct request *rq, |
1da177e4c
|
467 468 469 |
struct request *next) { elevator_t *e = q->elevator; |
1da177e4c
|
470 471 |
if (e->ops->elevator_merge_req_fn) e->ops->elevator_merge_req_fn(q, rq, next); |
06b86245c
|
472 |
|
9817064b6
|
473 474 475 476 |
elv_rqhash_reposition(q, rq); elv_rqhash_del(q, next); q->nr_sorted--; |
06b86245c
|
477 |
q->last_merge = rq; |
1da177e4c
|
478 |
} |
165125e1e
|
479 |
void elv_requeue_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
480 |
{ |
1da177e4c
|
481 482 483 484 |
/* * it already went through dequeue, we need to decrement the * in_flight count again */ |
8922e16cf
|
485 |
if (blk_account_rq(rq)) { |
1da177e4c
|
486 |
q->in_flight--; |
cad975164
|
487 488 |
if (blk_sorted_rq(rq)) elv_deactivate_rq(q, rq); |
8922e16cf
|
489 |
} |
1da177e4c
|
490 |
|
4aff5e233
|
491 |
rq->cmd_flags &= ~REQ_STARTED; |
1da177e4c
|
492 |
|
30e9656cc
|
493 |
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); |
1da177e4c
|
494 |
} |
165125e1e
|
495 |
static void elv_drain_elevator(struct request_queue *q) |
15853af9f
|
496 497 498 499 500 501 502 503 504 505 506 507 508 |
{ static int printed; while (q->elevator->ops->elevator_dispatch_fn(q, 1)) ; if (q->nr_sorted == 0) return; if (printed++ < 10) { printk(KERN_ERR "%s: forced dispatching is broken " "(nr_sorted=%u), please report this ", q->elevator->elevator_type->elevator_name, q->nr_sorted); } } |
165125e1e
|
509 |
void elv_insert(struct request_queue *q, struct request *rq, int where) |
1da177e4c
|
510 |
{ |
797e7dbbe
|
511 512 |
struct list_head *pos; unsigned ordseq; |
dac07ec12
|
513 |
int unplug_it = 1; |
797e7dbbe
|
514 |
|
2056a782f
|
515 |
blk_add_trace_rq(q, rq, BLK_TA_INSERT); |
1da177e4c
|
516 |
rq->q = q; |
8922e16cf
|
517 518 |
switch (where) { case ELEVATOR_INSERT_FRONT: |
4aff5e233
|
519 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
8922e16cf
|
520 521 522 523 524 |
list_add(&rq->queuelist, &q->queue_head); break; case ELEVATOR_INSERT_BACK: |
4aff5e233
|
525 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
15853af9f
|
526 |
elv_drain_elevator(q); |
8922e16cf
|
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 |
list_add_tail(&rq->queuelist, &q->queue_head); /* * We kick the queue here for the following reasons. * - The elevator might have returned NULL previously * to delay requests and returned them now. As the * queue wasn't empty before this request, ll_rw_blk * won't run the queue on return, resulting in hang. * - Usually, back inserted requests won't be merged * with anything. There's no point in delaying queue * processing. */ blk_remove_plug(q); q->request_fn(q); break; case ELEVATOR_INSERT_SORT: BUG_ON(!blk_fs_request(rq)); |
4aff5e233
|
544 |
rq->cmd_flags |= REQ_SORTED; |
15853af9f
|
545 |
q->nr_sorted++; |
9817064b6
|
546 547 548 549 550 |
if (rq_mergeable(rq)) { elv_rqhash_add(q, rq); if (!q->last_merge) q->last_merge = rq; } |
ca23509fb
|
551 552 553 554 555 556 |
/* * Some ioscheds (cfq) run q->request_fn directly, so * rq cannot be accessed after calling * elevator_add_req_fn. */ q->elevator->ops->elevator_add_req_fn(q, rq); |
8922e16cf
|
557 |
break; |
797e7dbbe
|
558 559 560 561 562 563 |
case ELEVATOR_INSERT_REQUEUE: /* * If ordered flush isn't in progress, we do front * insertion; otherwise, requests should be requeued * in ordseq order. */ |
4aff5e233
|
564 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
797e7dbbe
|
565 |
|
95543179f
|
566 567 568 569 570 |
/* * Most requeues happen because of a busy condition, * don't force unplug of the queue for that case. */ unplug_it = 0; |
797e7dbbe
|
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 |
if (q->ordseq == 0) { list_add(&rq->queuelist, &q->queue_head); break; } ordseq = blk_ordered_req_seq(rq); list_for_each(pos, &q->queue_head) { struct request *pos_rq = list_entry_rq(pos); if (ordseq <= blk_ordered_req_seq(pos_rq)) break; } list_add_tail(&rq->queuelist, pos); break; |
8922e16cf
|
586 587 588 589 590 591 |
default: printk(KERN_ERR "%s: bad insertion point %d ", __FUNCTION__, where); BUG(); } |
dac07ec12
|
592 |
if (unplug_it && blk_queue_plugged(q)) { |
8922e16cf
|
593 594 595 596 597 598 |
int nrq = q->rq.count[READ] + q->rq.count[WRITE] - q->in_flight; if (nrq >= q->unplug_thresh) __generic_unplug_device(q); } |
1da177e4c
|
599 |
} |
165125e1e
|
600 |
void __elv_add_request(struct request_queue *q, struct request *rq, int where, |
30e9656cc
|
601 602 603 |
int plug) { if (q->ordcolor) |
4aff5e233
|
604 |
rq->cmd_flags |= REQ_ORDERED_COLOR; |
30e9656cc
|
605 |
|
4aff5e233
|
606 |
if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { |
30e9656cc
|
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 |
/* * toggle ordered color */ if (blk_barrier_rq(rq)) q->ordcolor ^= 1; /* * barriers implicitly indicate back insertion */ if (where == ELEVATOR_INSERT_SORT) where = ELEVATOR_INSERT_BACK; /* * this request is scheduling boundary, update * end_sector */ if (blk_fs_request(rq)) { q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; } |
4aff5e233
|
627 |
} else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) |
30e9656cc
|
628 629 630 631 632 633 634 |
where = ELEVATOR_INSERT_BACK; if (plug) blk_plug_device(q); elv_insert(q, rq, where); } |
2e662b65f
|
635 |
EXPORT_SYMBOL(__elv_add_request); |
165125e1e
|
636 |
void elv_add_request(struct request_queue *q, struct request *rq, int where, |
1da177e4c
|
637 638 639 640 641 642 643 644 |
int plug) { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); __elv_add_request(q, rq, where, plug); spin_unlock_irqrestore(q->queue_lock, flags); } |
2e662b65f
|
645 |
EXPORT_SYMBOL(elv_add_request); |
165125e1e
|
646 |
static inline struct request *__elv_next_request(struct request_queue *q) |
1da177e4c
|
647 |
{ |
8922e16cf
|
648 |
struct request *rq; |
797e7dbbe
|
649 650 651 652 653 654 |
while (1) { while (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); if (blk_do_ordered(q, &rq)) return rq; } |
1da177e4c
|
655 |
|
797e7dbbe
|
656 657 |
if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) return NULL; |
1da177e4c
|
658 |
} |
1da177e4c
|
659 |
} |
165125e1e
|
660 |
struct request *elv_next_request(struct request_queue *q) |
1da177e4c
|
661 662 663 664 665 |
{ struct request *rq; int ret; while ((rq = __elv_next_request(q)) != NULL) { |
bf2de6f5a
|
666 667 668 669 670 671 672 673 |
/* * Kill the empty barrier place holder, the driver must * not ever see it. */ if (blk_empty_barrier(rq)) { end_queued_request(rq, 1); continue; } |
4aff5e233
|
674 |
if (!(rq->cmd_flags & REQ_STARTED)) { |
8922e16cf
|
675 676 677 678 679 |
/* * This is the first time the device driver * sees this request (possibly after * requeueing). Notify IO scheduler. */ |
cad975164
|
680 681 |
if (blk_sorted_rq(rq)) elv_activate_rq(q, rq); |
1da177e4c
|
682 |
|
8922e16cf
|
683 684 685 686 687 |
/* * just mark as started even if we don't start * it, a request that has been delayed should * not be passed by new incoming requests */ |
4aff5e233
|
688 |
rq->cmd_flags |= REQ_STARTED; |
2056a782f
|
689 |
blk_add_trace_rq(q, rq, BLK_TA_ISSUE); |
8922e16cf
|
690 |
} |
1da177e4c
|
691 |
|
8922e16cf
|
692 |
if (!q->boundary_rq || q->boundary_rq == rq) { |
1b47f531e
|
693 |
q->end_sector = rq_end_sector(rq); |
8922e16cf
|
694 695 |
q->boundary_rq = NULL; } |
1da177e4c
|
696 |
|
4aff5e233
|
697 |
if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn) |
1da177e4c
|
698 699 700 701 702 703 |
break; ret = q->prep_rq_fn(q, rq); if (ret == BLKPREP_OK) { break; } else if (ret == BLKPREP_DEFER) { |
2e759cd4f
|
704 705 706 |
/* * the request may have been (partially) prepped. * we need to keep this request in the front to |
8922e16cf
|
707 708 |
* avoid resource deadlock. REQ_STARTED will * prevent other fs requests from passing this one. |
2e759cd4f
|
709 |
*/ |
1da177e4c
|
710 711 712 |
rq = NULL; break; } else if (ret == BLKPREP_KILL) { |
4aff5e233
|
713 |
rq->cmd_flags |= REQ_QUIET; |
a0cd12854
|
714 |
end_queued_request(rq, 0); |
1da177e4c
|
715 716 717 718 719 720 721 722 723 724 |
} else { printk(KERN_ERR "%s: bad return=%d ", __FUNCTION__, ret); break; } } return rq; } |
2e662b65f
|
725 |
EXPORT_SYMBOL(elv_next_request); |
165125e1e
|
726 |
void elv_dequeue_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
727 |
{ |
8922e16cf
|
728 |
BUG_ON(list_empty(&rq->queuelist)); |
9817064b6
|
729 |
BUG_ON(ELV_ON_HASH(rq)); |
8922e16cf
|
730 731 |
list_del_init(&rq->queuelist); |
1da177e4c
|
732 733 734 735 |
/* * the time frame between a request being removed from the lists * and to it is freed is accounted as io that is in progress at |
8922e16cf
|
736 |
* the driver side. |
1da177e4c
|
737 738 739 |
*/ if (blk_account_rq(rq)) q->in_flight++; |
1da177e4c
|
740 |
} |
2e662b65f
|
741 |
EXPORT_SYMBOL(elv_dequeue_request); |
165125e1e
|
742 |
int elv_queue_empty(struct request_queue *q) |
1da177e4c
|
743 744 |
{ elevator_t *e = q->elevator; |
8922e16cf
|
745 746 |
if (!list_empty(&q->queue_head)) return 0; |
1da177e4c
|
747 748 |
if (e->ops->elevator_queue_empty_fn) return e->ops->elevator_queue_empty_fn(q); |
8922e16cf
|
749 |
return 1; |
1da177e4c
|
750 |
} |
2e662b65f
|
751 |
EXPORT_SYMBOL(elv_queue_empty); |
165125e1e
|
752 |
struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
753 |
{ |
1da177e4c
|
754 755 756 757 |
elevator_t *e = q->elevator; if (e->ops->elevator_latter_req_fn) return e->ops->elevator_latter_req_fn(q, rq); |
1da177e4c
|
758 759 |
return NULL; } |
165125e1e
|
760 |
struct request *elv_former_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
761 |
{ |
1da177e4c
|
762 763 764 765 |
elevator_t *e = q->elevator; if (e->ops->elevator_former_req_fn) return e->ops->elevator_former_req_fn(q, rq); |
1da177e4c
|
766 767 |
return NULL; } |
165125e1e
|
768 |
int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
1da177e4c
|
769 770 771 772 |
{ elevator_t *e = q->elevator; if (e->ops->elevator_set_req_fn) |
cb78b285c
|
773 |
return e->ops->elevator_set_req_fn(q, rq, gfp_mask); |
1da177e4c
|
774 775 776 777 |
rq->elevator_private = NULL; return 0; } |
165125e1e
|
778 |
void elv_put_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
779 780 781 782 |
{ elevator_t *e = q->elevator; if (e->ops->elevator_put_req_fn) |
bb37b94c6
|
783 |
e->ops->elevator_put_req_fn(rq); |
1da177e4c
|
784 |
} |
165125e1e
|
785 |
int elv_may_queue(struct request_queue *q, int rw) |
1da177e4c
|
786 787 788 789 |
{ elevator_t *e = q->elevator; if (e->ops->elevator_may_queue_fn) |
cb78b285c
|
790 |
return e->ops->elevator_may_queue_fn(q, rw); |
1da177e4c
|
791 792 793 |
return ELV_MQUEUE_MAY; } |
165125e1e
|
794 |
void elv_completed_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
795 796 797 798 799 800 |
{ elevator_t *e = q->elevator; /* * request is released from the driver, io must be done */ |
8922e16cf
|
801 |
if (blk_account_rq(rq)) { |
1da177e4c
|
802 |
q->in_flight--; |
1bc691d35
|
803 804 805 |
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) e->ops->elevator_completed_req_fn(q, rq); } |
797e7dbbe
|
806 |
|
1bc691d35
|
807 808 809 810 811 812 813 |
/* * Check if the queue is waiting for fs requests to be * drained for flush sequence. */ if (unlikely(q->ordseq)) { struct request *first_rq = list_entry_rq(q->queue_head.next); if (q->in_flight == 0 && |
797e7dbbe
|
814 815 816 817 818 |
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); q->request_fn(q); } |
8922e16cf
|
819 |
} |
1da177e4c
|
820 |
} |
3d1ab40f4
|
821 822 823 824 |
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) static ssize_t elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
1da177e4c
|
825 |
{ |
3d1ab40f4
|
826 827 828 829 830 831 832 833 834 835 836 837 |
elevator_t *e = container_of(kobj, elevator_t, kobj); struct elv_fs_entry *entry = to_elv(attr); ssize_t error; if (!entry->show) return -EIO; mutex_lock(&e->sysfs_lock); error = e->ops ? entry->show(e, page) : -ENOENT; mutex_unlock(&e->sysfs_lock); return error; } |
1da177e4c
|
838 |
|
3d1ab40f4
|
839 840 841 842 843 844 845 |
static ssize_t elv_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { elevator_t *e = container_of(kobj, elevator_t, kobj); struct elv_fs_entry *entry = to_elv(attr); ssize_t error; |
1da177e4c
|
846 |
|
3d1ab40f4
|
847 848 |
if (!entry->store) return -EIO; |
1da177e4c
|
849 |
|
3d1ab40f4
|
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 |
mutex_lock(&e->sysfs_lock); error = e->ops ? entry->store(e, page, length) : -ENOENT; mutex_unlock(&e->sysfs_lock); return error; } static struct sysfs_ops elv_sysfs_ops = { .show = elv_attr_show, .store = elv_attr_store, }; static struct kobj_type elv_ktype = { .sysfs_ops = &elv_sysfs_ops, .release = elevator_release, }; int elv_register_queue(struct request_queue *q) { elevator_t *e = q->elevator; int error; e->kobj.parent = &q->kobj; error = kobject_add(&e->kobj); if (!error) { |
e572ec7e4
|
875 |
struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; |
3d1ab40f4
|
876 |
if (attr) { |
e572ec7e4
|
877 878 |
while (attr->attr.name) { if (sysfs_create_file(&e->kobj, &attr->attr)) |
3d1ab40f4
|
879 |
break; |
e572ec7e4
|
880 |
attr++; |
3d1ab40f4
|
881 882 883 884 885 |
} } kobject_uevent(&e->kobj, KOBJ_ADD); } return error; |
1da177e4c
|
886 |
} |
bc1c11697
|
887 888 889 890 891 |
static void __elv_unregister_queue(elevator_t *e) { kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); } |
1da177e4c
|
892 893 |
void elv_unregister_queue(struct request_queue *q) { |
bc1c11697
|
894 895 |
if (q) __elv_unregister_queue(q->elevator); |
1da177e4c
|
896 897 898 899 |
} int elv_register(struct elevator_type *e) { |
1ffb96c58
|
900 |
char *def = ""; |
2a12dcd71
|
901 902 |
spin_lock(&elv_list_lock); |
ce5244974
|
903 |
BUG_ON(elevator_find(e->elevator_name)); |
1da177e4c
|
904 |
list_add_tail(&e->list, &elv_list); |
2a12dcd71
|
905 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
906 |
|
5f0039764
|
907 908 909 |
if (!strcmp(e->elevator_name, chosen_elevator) || (!*chosen_elevator && !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) |
1ffb96c58
|
910 911 912 913 |
def = " (default)"; printk(KERN_INFO "io scheduler %s registered%s ", e->elevator_name, def); |
1da177e4c
|
914 915 916 917 918 919 |
return 0; } EXPORT_SYMBOL_GPL(elv_register); void elv_unregister(struct elevator_type *e) { |
83521d3eb
|
920 921 922 923 924 |
struct task_struct *g, *p; /* * Iterate every thread in the process to remove the io contexts. */ |
e17a9489b
|
925 926 927 928 |
if (e->ops.trim) { read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); |
2d8f61316
|
929 930 |
if (p->io_context) e->ops.trim(p->io_context); |
e17a9489b
|
931 932 933 934 |
task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } |
83521d3eb
|
935 |
|
2a12dcd71
|
936 |
spin_lock(&elv_list_lock); |
1da177e4c
|
937 |
list_del_init(&e->list); |
2a12dcd71
|
938 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
939 940 941 942 943 944 945 |
} EXPORT_SYMBOL_GPL(elv_unregister); /* * switch to new_e io scheduler. be careful not to introduce deadlocks - * we don't free the old io scheduler, before we have allocated what we * need for the new one. this way we have a chance of going back to the old |
cb98fc8bb
|
946 |
* one, if the new one fails init for some reason. |
1da177e4c
|
947 |
*/ |
165125e1e
|
948 |
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
1da177e4c
|
949 |
{ |
cb98fc8bb
|
950 |
elevator_t *old_elevator, *e; |
bc1c11697
|
951 |
void *data; |
1da177e4c
|
952 |
|
cb98fc8bb
|
953 954 955 |
/* * Allocate new elevator */ |
b5deef901
|
956 |
e = elevator_alloc(q, new_e); |
1da177e4c
|
957 |
if (!e) |
3d1ab40f4
|
958 |
return 0; |
1da177e4c
|
959 |
|
bc1c11697
|
960 961 962 963 964 |
data = elevator_init_queue(q, e); if (!data) { kobject_put(&e->kobj); return 0; } |
1da177e4c
|
965 |
/* |
cb98fc8bb
|
966 |
* Turn on BYPASS and drain all requests w/ elevator private data |
1da177e4c
|
967 |
*/ |
cb98fc8bb
|
968 |
spin_lock_irq(q->queue_lock); |
64521d1a3
|
969 |
set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
cb98fc8bb
|
970 |
|
15853af9f
|
971 |
elv_drain_elevator(q); |
cb98fc8bb
|
972 973 |
while (q->rq.elvpriv) { |
407df2aa2
|
974 975 |
blk_remove_plug(q); q->request_fn(q); |
cb98fc8bb
|
976 |
spin_unlock_irq(q->queue_lock); |
64521d1a3
|
977 |
msleep(10); |
cb98fc8bb
|
978 |
spin_lock_irq(q->queue_lock); |
15853af9f
|
979 |
elv_drain_elevator(q); |
cb98fc8bb
|
980 |
} |
1da177e4c
|
981 |
/* |
bc1c11697
|
982 |
* Remember old elevator. |
1da177e4c
|
983 |
*/ |
1da177e4c
|
984 985 986 |
old_elevator = q->elevator; /* |
1da177e4c
|
987 988 |
* attach and start new elevator */ |
bc1c11697
|
989 990 991 992 993 |
elevator_attach(q, e, data); spin_unlock_irq(q->queue_lock); __elv_unregister_queue(old_elevator); |
1da177e4c
|
994 995 996 997 998 |
if (elv_register_queue(q)) goto fail_register; /* |
cb98fc8bb
|
999 |
* finally exit old elevator and turn off BYPASS. |
1da177e4c
|
1000 1001 |
*/ elevator_exit(old_elevator); |
64521d1a3
|
1002 |
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
3d1ab40f4
|
1003 |
return 1; |
1da177e4c
|
1004 1005 1006 1007 1008 1009 1010 |
fail_register: /* * switch failed, exit the new io scheduler and reattach the old * one again (along with re-adding the sysfs dir) */ elevator_exit(e); |
1da177e4c
|
1011 1012 |
q->elevator = old_elevator; elv_register_queue(q); |
64521d1a3
|
1013 |
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
3d1ab40f4
|
1014 |
return 0; |
1da177e4c
|
1015 |
} |
165125e1e
|
1016 1017 |
ssize_t elv_iosched_store(struct request_queue *q, const char *name, size_t count) |
1da177e4c
|
1018 1019 |
{ char elevator_name[ELV_NAME_MAX]; |
be5612356
|
1020 |
size_t len; |
1da177e4c
|
1021 |
struct elevator_type *e; |
be5612356
|
1022 1023 1024 |
elevator_name[sizeof(elevator_name) - 1] = '\0'; strncpy(elevator_name, name, sizeof(elevator_name) - 1); len = strlen(elevator_name); |
1da177e4c
|
1025 |
|
be5612356
|
1026 1027 1028 |
if (len && elevator_name[len - 1] == ' ') elevator_name[len - 1] = '\0'; |
1da177e4c
|
1029 1030 1031 1032 1033 1034 1035 |
e = elevator_get(elevator_name); if (!e) { printk(KERN_ERR "elevator: type %s not found ", elevator_name); return -EINVAL; } |
2ca7d93bb
|
1036 1037 |
if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { elevator_put(e); |
1da177e4c
|
1038 |
return count; |
2ca7d93bb
|
1039 |
} |
1da177e4c
|
1040 |
|
3d1ab40f4
|
1041 1042 1043 |
if (!elevator_switch(q, e)) printk(KERN_ERR "elevator: switch to %s failed ",elevator_name); |
1da177e4c
|
1044 1045 |
return count; } |
165125e1e
|
1046 |
ssize_t elv_iosched_show(struct request_queue *q, char *name) |
1da177e4c
|
1047 1048 1049 |
{ elevator_t *e = q->elevator; struct elevator_type *elv = e->elevator_type; |
70cee26e0
|
1050 |
struct elevator_type *__e; |
1da177e4c
|
1051 |
int len = 0; |
2a12dcd71
|
1052 |
spin_lock(&elv_list_lock); |
70cee26e0
|
1053 |
list_for_each_entry(__e, &elv_list, list) { |
1da177e4c
|
1054 1055 1056 1057 1058 |
if (!strcmp(elv->elevator_name, __e->elevator_name)) len += sprintf(name+len, "[%s] ", elv->elevator_name); else len += sprintf(name+len, "%s ", __e->elevator_name); } |
2a12dcd71
|
1059 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
1060 1061 1062 1063 1064 |
len += sprintf(len+name, " "); return len; } |
165125e1e
|
1065 1066 |
struct request *elv_rb_former_request(struct request_queue *q, struct request *rq) |
2e662b65f
|
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 |
{ struct rb_node *rbprev = rb_prev(&rq->rb_node); if (rbprev) return rb_entry_rq(rbprev); return NULL; } EXPORT_SYMBOL(elv_rb_former_request); |
165125e1e
|
1077 1078 |
struct request *elv_rb_latter_request(struct request_queue *q, struct request *rq) |
2e662b65f
|
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 |
{ struct rb_node *rbnext = rb_next(&rq->rb_node); if (rbnext) return rb_entry_rq(rbnext); return NULL; } EXPORT_SYMBOL(elv_rb_latter_request); |