Blame view
block/elevator.c
24.4 KB
1da177e4c
|
1 |
/* |
1da177e4c
|
2 3 4 5 |
* Block device elevator/IO-scheduler. * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * |
0fe234795
|
6 |
* 30042000 Jens Axboe <axboe@kernel.dk> : |
1da177e4c
|
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
* * Split the elevator a bit so that it is possible to choose a different * one or even write a new "plug in". There are three pieces: * - elevator_fn, inserts a new request in the queue list * - elevator_merge_fn, decides whether a new buffer can be merged with * an existing request * - elevator_dequeue_fn, called when a request is taken off the active list * * 20082000 Dave Jones <davej@suse.de> : * Removed tests for max-bomb-segments, which was breaking elvtune * when run without -bN * * Jens: * - Rework again to work with bio instead of buffer_heads * - loose bi_dev comparisons, partition handling is right now * - completely modularize elevator setup and teardown * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> |
1da177e4c
|
30 31 32 33 |
#include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/compiler.h> |
cb98fc8bb
|
34 |
#include <linux/delay.h> |
2056a782f
|
35 |
#include <linux/blktrace_api.h> |
9817064b6
|
36 |
#include <linux/hash.h> |
0835da67c
|
37 |
#include <linux/uaccess.h> |
1da177e4c
|
38 |
|
55782138e
|
39 |
#include <trace/events/block.h> |
242f9dcb8
|
40 |
#include "blk.h" |
1da177e4c
|
41 42 43 44 |
static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); /* |
9817064b6
|
45 46 47 48 |
* Merge hash stuff. */ static const int elv_hash_shift = 6; #define ELV_HASH_BLOCK(sec) ((sec) >> 3) |
4eb166d98
|
49 50 |
#define ELV_HASH_FN(sec) \ (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) |
9817064b6
|
51 |
#define ELV_HASH_ENTRIES (1 << elv_hash_shift) |
83096ebf1
|
52 |
#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
9817064b6
|
53 54 |
/* |
da7752650
|
55 56 57 58 59 |
* Query io scheduler to see if the current process issuing bio may be * merged with rq. */ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) { |
165125e1e
|
60 |
struct request_queue *q = rq->q; |
b374d18a4
|
61 |
struct elevator_queue *e = q->elevator; |
da7752650
|
62 63 64 65 66 67 68 69 |
if (e->ops->elevator_allow_merge_fn) return e->ops->elevator_allow_merge_fn(q, rq, bio); return 1; } /* |
1da177e4c
|
70 71 |
* can we safely merge with this request? */ |
72ed0bf60
|
72 |
int elv_rq_merge_ok(struct request *rq, struct bio *bio) |
1da177e4c
|
73 74 75 76 77 |
{ if (!rq_mergeable(rq)) return 0; /* |
e17fc0a1c
|
78 79 |
* Don't merge file system requests and discard requests */ |
7b6d91dae
|
80 |
if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD)) |
e17fc0a1c
|
81 82 83 |
return 0; /* |
8d57a98cc
|
84 85 86 87 88 89 |
* Don't merge discard requests and secure discard requests */ if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE)) return 0; /* |
1da177e4c
|
90 91 92 93 94 95 |
* different data direction or already started, don't merge */ if (bio_data_dir(bio) != rq_data_dir(rq)) return 0; /* |
da7752650
|
96 |
* must be same device and not a special request |
1da177e4c
|
97 |
*/ |
bb4067e34
|
98 |
if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) |
da7752650
|
99 |
return 0; |
7ba1ba12e
|
100 101 102 103 104 |
/* * only merge integrity protected bio into ditto rq */ if (bio_integrity(bio) != blk_integrity_rq(rq)) return 0; |
da7752650
|
105 106 |
if (!elv_iosched_allow_merge(rq, bio)) return 0; |
1da177e4c
|
107 |
|
da7752650
|
108 |
return 1; |
1da177e4c
|
109 110 |
} EXPORT_SYMBOL(elv_rq_merge_ok); |
73c101011
|
111 |
int elv_try_merge(struct request *__rq, struct bio *bio) |
1da177e4c
|
112 113 114 115 116 117 118 |
{ int ret = ELEVATOR_NO_MERGE; /* * we can merge and sequence is ok, check if it's possible */ if (elv_rq_merge_ok(__rq, bio)) { |
83096ebf1
|
119 |
if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector) |
1da177e4c
|
120 |
ret = ELEVATOR_BACK_MERGE; |
83096ebf1
|
121 |
else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector) |
1da177e4c
|
122 123 124 125 126 |
ret = ELEVATOR_FRONT_MERGE; } return ret; } |
1da177e4c
|
127 |
|
1da177e4c
|
128 129 |
static struct elevator_type *elevator_find(const char *name) { |
a22b169df
|
130 |
struct elevator_type *e; |
1da177e4c
|
131 |
|
70cee26e0
|
132 |
list_for_each_entry(e, &elv_list, list) { |
a22b169df
|
133 134 |
if (!strcmp(e->elevator_name, name)) return e; |
1da177e4c
|
135 |
} |
1da177e4c
|
136 |
|
a22b169df
|
137 |
return NULL; |
1da177e4c
|
138 139 140 141 142 143 144 145 146 |
} static void elevator_put(struct elevator_type *e) { module_put(e->elevator_owner); } static struct elevator_type *elevator_get(const char *name) { |
2824bc932
|
147 |
struct elevator_type *e; |
1da177e4c
|
148 |
|
2a12dcd71
|
149 |
spin_lock(&elv_list_lock); |
2824bc932
|
150 151 |
e = elevator_find(name); |
e16409496
|
152 |
if (!e) { |
e16409496
|
153 |
spin_unlock(&elv_list_lock); |
490b94be0
|
154 |
request_module("%s-iosched", name); |
e16409496
|
155 156 157 |
spin_lock(&elv_list_lock); e = elevator_find(name); } |
2824bc932
|
158 159 |
if (e && !try_module_get(e->elevator_owner)) e = NULL; |
2a12dcd71
|
160 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
161 162 163 |
return e; } |
165125e1e
|
164 165 |
static void *elevator_init_queue(struct request_queue *q, struct elevator_queue *eq) |
1da177e4c
|
166 |
{ |
bb37b94c6
|
167 |
return eq->ops->elevator_init_fn(q); |
bc1c11697
|
168 |
} |
1da177e4c
|
169 |
|
165125e1e
|
170 |
static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, |
bc1c11697
|
171 172 |
void *data) { |
1da177e4c
|
173 |
q->elevator = eq; |
bc1c11697
|
174 |
eq->elevator_data = data; |
1da177e4c
|
175 176 177 |
} static char chosen_elevator[16]; |
5f0039764
|
178 |
static int __init elevator_setup(char *str) |
1da177e4c
|
179 |
{ |
752a3b796
|
180 181 182 183 |
/* * Be backwards-compatible with previous kernels, so users * won't get the wrong elevator. */ |
492af6350
|
184 |
strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); |
9b41046cd
|
185 |
return 1; |
1da177e4c
|
186 187 188 |
} __setup("elevator=", elevator_setup); |
3d1ab40f4
|
189 |
static struct kobj_type elv_ktype; |
b374d18a4
|
190 |
static struct elevator_queue *elevator_alloc(struct request_queue *q, |
165125e1e
|
191 |
struct elevator_type *e) |
3d1ab40f4
|
192 |
{ |
b374d18a4
|
193 |
struct elevator_queue *eq; |
9817064b6
|
194 |
int i; |
b374d18a4
|
195 |
eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); |
9817064b6
|
196 197 |
if (unlikely(!eq)) goto err; |
9817064b6
|
198 199 |
eq->ops = &e->ops; eq->elevator_type = e; |
f9cb074bf
|
200 |
kobject_init(&eq->kobj, &elv_ktype); |
9817064b6
|
201 |
mutex_init(&eq->sysfs_lock); |
b5deef901
|
202 203 |
eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL, q->node); |
9817064b6
|
204 205 206 207 208 |
if (!eq->hash) goto err; for (i = 0; i < ELV_HASH_ENTRIES; i++) INIT_HLIST_HEAD(&eq->hash[i]); |
3d1ab40f4
|
209 |
return eq; |
9817064b6
|
210 211 212 213 |
err: kfree(eq); elevator_put(e); return NULL; |
3d1ab40f4
|
214 215 216 217 |
} static void elevator_release(struct kobject *kobj) { |
b374d18a4
|
218 |
struct elevator_queue *e; |
9817064b6
|
219 |
|
b374d18a4
|
220 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
221 |
elevator_put(e->elevator_type); |
9817064b6
|
222 |
kfree(e->hash); |
3d1ab40f4
|
223 224 |
kfree(e); } |
165125e1e
|
225 |
int elevator_init(struct request_queue *q, char *name) |
1da177e4c
|
226 227 228 |
{ struct elevator_type *e = NULL; struct elevator_queue *eq; |
bc1c11697
|
229 |
void *data; |
1da177e4c
|
230 |
|
1abec4fdb
|
231 232 |
if (unlikely(q->elevator)) return 0; |
cb98fc8bb
|
233 234 235 236 |
INIT_LIST_HEAD(&q->queue_head); q->last_merge = NULL; q->end_sector = 0; q->boundary_rq = NULL; |
cb98fc8bb
|
237 |
|
4eb166d98
|
238 239 240 241 242 |
if (name) { e = elevator_get(name); if (!e) return -EINVAL; } |
1da177e4c
|
243 |
|
4eb166d98
|
244 245 246 247 248 249 250 |
if (!e && *chosen_elevator) { e = elevator_get(chosen_elevator); if (!e) printk(KERN_ERR "I/O scheduler %s not found ", chosen_elevator); } |
248d5ca5e
|
251 |
|
4eb166d98
|
252 253 254 255 256 257 258 259 260 |
if (!e) { e = elevator_get(CONFIG_DEFAULT_IOSCHED); if (!e) { printk(KERN_ERR "Default I/O scheduler not found. " \ "Using noop. "); e = elevator_get("noop"); } |
5f0039764
|
261 |
} |
b5deef901
|
262 |
eq = elevator_alloc(q, e); |
3d1ab40f4
|
263 |
if (!eq) |
1da177e4c
|
264 |
return -ENOMEM; |
1da177e4c
|
265 |
|
bc1c11697
|
266 267 |
data = elevator_init_queue(q, eq); if (!data) { |
3d1ab40f4
|
268 |
kobject_put(&eq->kobj); |
bc1c11697
|
269 270 |
return -ENOMEM; } |
1da177e4c
|
271 |
|
bc1c11697
|
272 |
elevator_attach(q, eq, data); |
1abec4fdb
|
273 |
return 0; |
1da177e4c
|
274 |
} |
2e662b65f
|
275 |
EXPORT_SYMBOL(elevator_init); |
b374d18a4
|
276 |
void elevator_exit(struct elevator_queue *e) |
1da177e4c
|
277 |
{ |
3d1ab40f4
|
278 |
mutex_lock(&e->sysfs_lock); |
1da177e4c
|
279 280 |
if (e->ops->elevator_exit_fn) e->ops->elevator_exit_fn(e); |
3d1ab40f4
|
281 282 |
e->ops = NULL; mutex_unlock(&e->sysfs_lock); |
1da177e4c
|
283 |
|
3d1ab40f4
|
284 |
kobject_put(&e->kobj); |
1da177e4c
|
285 |
} |
2e662b65f
|
286 |
EXPORT_SYMBOL(elevator_exit); |
9817064b6
|
287 288 289 290 |
static inline void __elv_rqhash_del(struct request *rq) { hlist_del_init(&rq->hash); } |
165125e1e
|
291 |
static void elv_rqhash_del(struct request_queue *q, struct request *rq) |
9817064b6
|
292 293 294 295 |
{ if (ELV_ON_HASH(rq)) __elv_rqhash_del(rq); } |
165125e1e
|
296 |
static void elv_rqhash_add(struct request_queue *q, struct request *rq) |
9817064b6
|
297 |
{ |
b374d18a4
|
298 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
299 300 301 302 |
BUG_ON(ELV_ON_HASH(rq)); hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); } |
165125e1e
|
303 |
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
9817064b6
|
304 305 306 307 |
{ __elv_rqhash_del(rq); elv_rqhash_add(q, rq); } |
165125e1e
|
308 |
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
9817064b6
|
309 |
{ |
b374d18a4
|
310 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 |
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; struct hlist_node *entry, *next; struct request *rq; hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { BUG_ON(!ELV_ON_HASH(rq)); if (unlikely(!rq_mergeable(rq))) { __elv_rqhash_del(rq); continue; } if (rq_hash_key(rq) == offset) return rq; } return NULL; } |
8922e16cf
|
329 |
/* |
2e662b65f
|
330 331 332 |
* RB-tree support functions for inserting/lookup/removal of requests * in a sorted RB tree. */ |
796d5116c
|
333 |
void elv_rb_add(struct rb_root *root, struct request *rq) |
2e662b65f
|
334 335 336 337 338 339 340 341 |
{ struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct request *__rq; while (*p) { parent = *p; __rq = rb_entry(parent, struct request, rb_node); |
83096ebf1
|
342 |
if (blk_rq_pos(rq) < blk_rq_pos(__rq)) |
2e662b65f
|
343 |
p = &(*p)->rb_left; |
796d5116c
|
344 |
else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) |
2e662b65f
|
345 |
p = &(*p)->rb_right; |
2e662b65f
|
346 347 348 349 |
} rb_link_node(&rq->rb_node, parent, p); rb_insert_color(&rq->rb_node, root); |
2e662b65f
|
350 |
} |
2e662b65f
|
351 352 353 354 355 356 357 358 |
EXPORT_SYMBOL(elv_rb_add); void elv_rb_del(struct rb_root *root, struct request *rq) { BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); rb_erase(&rq->rb_node, root); RB_CLEAR_NODE(&rq->rb_node); } |
2e662b65f
|
359 360 361 362 363 364 365 366 367 |
EXPORT_SYMBOL(elv_rb_del); struct request *elv_rb_find(struct rb_root *root, sector_t sector) { struct rb_node *n = root->rb_node; struct request *rq; while (n) { rq = rb_entry(n, struct request, rb_node); |
83096ebf1
|
368 |
if (sector < blk_rq_pos(rq)) |
2e662b65f
|
369 |
n = n->rb_left; |
83096ebf1
|
370 |
else if (sector > blk_rq_pos(rq)) |
2e662b65f
|
371 372 373 374 375 376 377 |
n = n->rb_right; else return rq; } return NULL; } |
2e662b65f
|
378 379 380 |
EXPORT_SYMBOL(elv_rb_find); /* |
8922e16cf
|
381 |
* Insert rq into dispatch queue of q. Queue lock must be held on |
dbe7f76dd
|
382 |
* entry. rq is sort instead into the dispatch queue. To be used by |
2e662b65f
|
383 |
* specific elevators. |
8922e16cf
|
384 |
*/ |
165125e1e
|
385 |
void elv_dispatch_sort(struct request_queue *q, struct request *rq) |
8922e16cf
|
386 387 |
{ sector_t boundary; |
8922e16cf
|
388 |
struct list_head *entry; |
4eb166d98
|
389 |
int stop_flags; |
8922e16cf
|
390 |
|
06b86245c
|
391 392 |
if (q->last_merge == rq) q->last_merge = NULL; |
9817064b6
|
393 394 |
elv_rqhash_del(q, rq); |
15853af9f
|
395 |
q->nr_sorted--; |
06b86245c
|
396 |
|
1b47f531e
|
397 |
boundary = q->end_sector; |
02e031cbc
|
398 |
stop_flags = REQ_SOFTBARRIER | REQ_STARTED; |
8922e16cf
|
399 400 |
list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); |
33659ebba
|
401 402 |
if ((rq->cmd_flags & REQ_DISCARD) != (pos->cmd_flags & REQ_DISCARD)) |
e17fc0a1c
|
403 |
break; |
783660b2f
|
404 405 |
if (rq_data_dir(rq) != rq_data_dir(pos)) break; |
4eb166d98
|
406 |
if (pos->cmd_flags & stop_flags) |
8922e16cf
|
407 |
break; |
83096ebf1
|
408 409 |
if (blk_rq_pos(rq) >= boundary) { if (blk_rq_pos(pos) < boundary) |
8922e16cf
|
410 411 |
continue; } else { |
83096ebf1
|
412 |
if (blk_rq_pos(pos) >= boundary) |
8922e16cf
|
413 414 |
break; } |
83096ebf1
|
415 |
if (blk_rq_pos(rq) >= blk_rq_pos(pos)) |
8922e16cf
|
416 417 418 419 420 |
break; } list_add(&rq->queuelist, entry); } |
2e662b65f
|
421 |
EXPORT_SYMBOL(elv_dispatch_sort); |
9817064b6
|
422 |
/* |
2e662b65f
|
423 424 425 |
* Insert rq into dispatch queue of q. Queue lock must be held on * entry. rq is added to the back of the dispatch queue. To be used by * specific elevators. |
9817064b6
|
426 427 428 429 430 431 432 433 434 435 436 437 438 439 |
*/ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) { if (q->last_merge == rq) q->last_merge = NULL; elv_rqhash_del(q, rq); q->nr_sorted--; q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; list_add_tail(&rq->queuelist, &q->queue_head); } |
2e662b65f
|
440 |
EXPORT_SYMBOL(elv_dispatch_add_tail); |
165125e1e
|
441 |
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) |
1da177e4c
|
442 |
{ |
b374d18a4
|
443 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
444 |
struct request *__rq; |
06b86245c
|
445 |
int ret; |
9817064b6
|
446 |
/* |
488991e28
|
447 448 449 450 451 452 453 454 455 |
* Levels of merges: * nomerges: No merges at all attempted * noxmerges: Only simple one-hit cache try * merges: All merge tries attempted */ if (blk_queue_nomerges(q)) return ELEVATOR_NO_MERGE; /* |
9817064b6
|
456 457 |
* First try one-hit cache. */ |
06b86245c
|
458 459 460 461 462 463 464 |
if (q->last_merge) { ret = elv_try_merge(q->last_merge, bio); if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; return ret; } } |
1da177e4c
|
465 |
|
488991e28
|
466 |
if (blk_queue_noxmerges(q)) |
ac9fafa12
|
467 |
return ELEVATOR_NO_MERGE; |
9817064b6
|
468 469 470 471 472 473 474 475 |
/* * See if our hash lookup can find a potential backmerge. */ __rq = elv_rqhash_find(q, bio->bi_sector); if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_BACK_MERGE; } |
1da177e4c
|
476 477 478 479 480 |
if (e->ops->elevator_merge_fn) return e->ops->elevator_merge_fn(q, req, bio); return ELEVATOR_NO_MERGE; } |
5e84ea3a9
|
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 |
/* * Attempt to do an insertion back merge. Only check for the case where * we can append 'rq' to an existing request, so we can throw 'rq' away * afterwards. * * Returns true if we merged, false otherwise */ static bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) { struct request *__rq; if (blk_queue_nomerges(q)) return false; /* * First try one-hit cache. */ if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) return true; if (blk_queue_noxmerges(q)) return false; /* * See if our hash lookup can find a potential backmerge. */ __rq = elv_rqhash_find(q, blk_rq_pos(rq)); if (__rq && blk_attempt_req_merge(q, __rq, rq)) return true; return false; } |
165125e1e
|
514 |
void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
1da177e4c
|
515 |
{ |
b374d18a4
|
516 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
517 518 |
if (e->ops->elevator_merged_fn) |
2e662b65f
|
519 |
e->ops->elevator_merged_fn(q, rq, type); |
06b86245c
|
520 |
|
2e662b65f
|
521 522 |
if (type == ELEVATOR_BACK_MERGE) elv_rqhash_reposition(q, rq); |
9817064b6
|
523 |
|
06b86245c
|
524 |
q->last_merge = rq; |
1da177e4c
|
525 |
} |
165125e1e
|
526 |
void elv_merge_requests(struct request_queue *q, struct request *rq, |
1da177e4c
|
527 528 |
struct request *next) { |
b374d18a4
|
529 |
struct elevator_queue *e = q->elevator; |
5e84ea3a9
|
530 |
const int next_sorted = next->cmd_flags & REQ_SORTED; |
1da177e4c
|
531 |
|
5e84ea3a9
|
532 |
if (next_sorted && e->ops->elevator_merge_req_fn) |
1da177e4c
|
533 |
e->ops->elevator_merge_req_fn(q, rq, next); |
06b86245c
|
534 |
|
9817064b6
|
535 |
elv_rqhash_reposition(q, rq); |
9817064b6
|
536 |
|
5e84ea3a9
|
537 538 539 540 |
if (next_sorted) { elv_rqhash_del(q, next); q->nr_sorted--; } |
06b86245c
|
541 |
q->last_merge = rq; |
1da177e4c
|
542 |
} |
812d40264
|
543 544 545 546 547 548 549 550 |
void elv_bio_merged(struct request_queue *q, struct request *rq, struct bio *bio) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_bio_merged_fn) e->ops->elevator_bio_merged_fn(q, rq, bio); } |
165125e1e
|
551 |
void elv_requeue_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
552 |
{ |
1da177e4c
|
553 554 555 556 |
/* * it already went through dequeue, we need to decrement the * in_flight count again */ |
8922e16cf
|
557 |
if (blk_account_rq(rq)) { |
0a7ae2ff0
|
558 |
q->in_flight[rq_is_sync(rq)]--; |
33659ebba
|
559 |
if (rq->cmd_flags & REQ_SORTED) |
cad975164
|
560 |
elv_deactivate_rq(q, rq); |
8922e16cf
|
561 |
} |
1da177e4c
|
562 |
|
4aff5e233
|
563 |
rq->cmd_flags &= ~REQ_STARTED; |
1da177e4c
|
564 |
|
b710a4805
|
565 |
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); |
1da177e4c
|
566 |
} |
26308eab6
|
567 |
void elv_drain_elevator(struct request_queue *q) |
15853af9f
|
568 569 570 571 572 573 574 575 576 577 578 579 580 |
{ static int printed; while (q->elevator->ops->elevator_dispatch_fn(q, 1)) ; if (q->nr_sorted == 0) return; if (printed++ < 10) { printk(KERN_ERR "%s: forced dispatching is broken " "(nr_sorted=%u), please report this ", q->elevator->elevator_type->elevator_name, q->nr_sorted); } } |
6c7e8cee6
|
581 582 583 |
/* * Call with queue lock held, interrupts disabled */ |
f600abe2d
|
584 |
void elv_quiesce_start(struct request_queue *q) |
6c7e8cee6
|
585 |
{ |
cd43e26f0
|
586 587 |
if (!q->elevator) return; |
6c7e8cee6
|
588 589 590 591 592 593 594 |
queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); /* * make sure we don't have any requests in flight */ elv_drain_elevator(q); while (q->rq.elvpriv) { |
24ecfbe27
|
595 |
__blk_run_queue(q); |
6c7e8cee6
|
596 597 598 599 600 601 |
spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); elv_drain_elevator(q); } } |
f600abe2d
|
602 |
void elv_quiesce_end(struct request_queue *q) |
6c7e8cee6
|
603 604 605 |
{ queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); } |
b710a4805
|
606 |
void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
1da177e4c
|
607 |
{ |
5f3ea37c7
|
608 |
trace_block_rq_insert(q, rq); |
2056a782f
|
609 |
|
1da177e4c
|
610 |
rq->q = q; |
b710a4805
|
611 612 613 614 615 616 617 618 |
if (rq->cmd_flags & REQ_SOFTBARRIER) { /* barriers are scheduling boundary, update end_sector */ if (rq->cmd_type == REQ_TYPE_FS || (rq->cmd_flags & REQ_DISCARD)) { q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; } } else if (!(rq->cmd_flags & REQ_ELVPRIV) && |
3aa72873f
|
619 620 |
(where == ELEVATOR_INSERT_SORT || where == ELEVATOR_INSERT_SORT_MERGE)) |
b710a4805
|
621 |
where = ELEVATOR_INSERT_BACK; |
8922e16cf
|
622 |
switch (where) { |
28e7d1845
|
623 |
case ELEVATOR_INSERT_REQUEUE: |
8922e16cf
|
624 |
case ELEVATOR_INSERT_FRONT: |
4aff5e233
|
625 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
8922e16cf
|
626 627 628 629 |
list_add(&rq->queuelist, &q->queue_head); break; case ELEVATOR_INSERT_BACK: |
4aff5e233
|
630 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
15853af9f
|
631 |
elv_drain_elevator(q); |
8922e16cf
|
632 633 634 635 636 637 638 639 640 641 642 |
list_add_tail(&rq->queuelist, &q->queue_head); /* * We kick the queue here for the following reasons. * - The elevator might have returned NULL previously * to delay requests and returned them now. As the * queue wasn't empty before this request, ll_rw_blk * won't run the queue on return, resulting in hang. * - Usually, back inserted requests won't be merged * with anything. There's no point in delaying queue * processing. */ |
24ecfbe27
|
643 |
__blk_run_queue(q); |
8922e16cf
|
644 |
break; |
5e84ea3a9
|
645 646 647 648 649 650 651 652 |
case ELEVATOR_INSERT_SORT_MERGE: /* * If we succeed in merging this request with one in the * queue already, we are done - rq has now been freed, * so no need to do anything further. */ if (elv_attempt_insert_merge(q, rq)) break; |
8922e16cf
|
653 |
case ELEVATOR_INSERT_SORT: |
33659ebba
|
654 655 |
BUG_ON(rq->cmd_type != REQ_TYPE_FS && !(rq->cmd_flags & REQ_DISCARD)); |
4aff5e233
|
656 |
rq->cmd_flags |= REQ_SORTED; |
15853af9f
|
657 |
q->nr_sorted++; |
9817064b6
|
658 659 660 661 662 |
if (rq_mergeable(rq)) { elv_rqhash_add(q, rq); if (!q->last_merge) q->last_merge = rq; } |
ca23509fb
|
663 664 665 666 667 668 |
/* * Some ioscheds (cfq) run q->request_fn directly, so * rq cannot be accessed after calling * elevator_add_req_fn. */ q->elevator->ops->elevator_add_req_fn(q, rq); |
8922e16cf
|
669 |
break; |
ae1b15396
|
670 671 672 673 |
case ELEVATOR_INSERT_FLUSH: rq->cmd_flags |= REQ_SOFTBARRIER; blk_insert_flush(rq); break; |
8922e16cf
|
674 675 676 |
default: printk(KERN_ERR "%s: bad insertion point %d ", |
24c03d47d
|
677 |
__func__, where); |
8922e16cf
|
678 679 |
BUG(); } |
1da177e4c
|
680 |
} |
2e662b65f
|
681 |
EXPORT_SYMBOL(__elv_add_request); |
7eaceacca
|
682 |
void elv_add_request(struct request_queue *q, struct request *rq, int where) |
1da177e4c
|
683 684 685 686 |
{ unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); |
7eaceacca
|
687 |
__elv_add_request(q, rq, where); |
1da177e4c
|
688 689 |
spin_unlock_irqrestore(q->queue_lock, flags); } |
2e662b65f
|
690 |
EXPORT_SYMBOL(elv_add_request); |
165125e1e
|
691 |
struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
692 |
{ |
b374d18a4
|
693 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
694 695 696 |
if (e->ops->elevator_latter_req_fn) return e->ops->elevator_latter_req_fn(q, rq); |
1da177e4c
|
697 698 |
return NULL; } |
165125e1e
|
699 |
struct request *elv_former_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
700 |
{ |
b374d18a4
|
701 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
702 703 704 |
if (e->ops->elevator_former_req_fn) return e->ops->elevator_former_req_fn(q, rq); |
1da177e4c
|
705 706 |
return NULL; } |
165125e1e
|
707 |
int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
1da177e4c
|
708 |
{ |
b374d18a4
|
709 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
710 711 |
if (e->ops->elevator_set_req_fn) |
cb78b285c
|
712 |
return e->ops->elevator_set_req_fn(q, rq, gfp_mask); |
1da177e4c
|
713 |
|
c186794db
|
714 |
rq->elevator_private[0] = NULL; |
1da177e4c
|
715 716 |
return 0; } |
165125e1e
|
717 |
void elv_put_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
718 |
{ |
b374d18a4
|
719 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
720 721 |
if (e->ops->elevator_put_req_fn) |
bb37b94c6
|
722 |
e->ops->elevator_put_req_fn(rq); |
1da177e4c
|
723 |
} |
165125e1e
|
724 |
int elv_may_queue(struct request_queue *q, int rw) |
1da177e4c
|
725 |
{ |
b374d18a4
|
726 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
727 728 |
if (e->ops->elevator_may_queue_fn) |
cb78b285c
|
729 |
return e->ops->elevator_may_queue_fn(q, rw); |
1da177e4c
|
730 731 732 |
return ELV_MQUEUE_MAY; } |
11914a53d
|
733 734 735 |
void elv_abort_queue(struct request_queue *q) { struct request *rq; |
ae1b15396
|
736 |
blk_abort_flushes(q); |
11914a53d
|
737 738 739 |
while (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); rq->cmd_flags |= REQ_QUIET; |
5f3ea37c7
|
740 |
trace_block_rq_abort(q, rq); |
53c663ce0
|
741 742 743 744 745 |
/* * Mark this request as started so we don't trigger * any debug logic in the end I/O path. */ blk_start_request(rq); |
40cbbb781
|
746 |
__blk_end_request_all(rq, -EIO); |
11914a53d
|
747 748 749 |
} } EXPORT_SYMBOL(elv_abort_queue); |
165125e1e
|
750 |
void elv_completed_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
751 |
{ |
b374d18a4
|
752 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
753 754 755 756 |
/* * request is released from the driver, io must be done */ |
8922e16cf
|
757 |
if (blk_account_rq(rq)) { |
0a7ae2ff0
|
758 |
q->in_flight[rq_is_sync(rq)]--; |
33659ebba
|
759 760 |
if ((rq->cmd_flags & REQ_SORTED) && e->ops->elevator_completed_req_fn) |
1bc691d35
|
761 762 |
e->ops->elevator_completed_req_fn(q, rq); } |
1da177e4c
|
763 |
} |
3d1ab40f4
|
764 765 766 767 |
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) static ssize_t elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
1da177e4c
|
768 |
{ |
3d1ab40f4
|
769 |
struct elv_fs_entry *entry = to_elv(attr); |
b374d18a4
|
770 |
struct elevator_queue *e; |
3d1ab40f4
|
771 772 773 774 |
ssize_t error; if (!entry->show) return -EIO; |
b374d18a4
|
775 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
776 777 778 779 780 |
mutex_lock(&e->sysfs_lock); error = e->ops ? entry->show(e, page) : -ENOENT; mutex_unlock(&e->sysfs_lock); return error; } |
1da177e4c
|
781 |
|
3d1ab40f4
|
782 783 784 785 |
static ssize_t elv_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { |
3d1ab40f4
|
786 |
struct elv_fs_entry *entry = to_elv(attr); |
b374d18a4
|
787 |
struct elevator_queue *e; |
3d1ab40f4
|
788 |
ssize_t error; |
1da177e4c
|
789 |
|
3d1ab40f4
|
790 791 |
if (!entry->store) return -EIO; |
1da177e4c
|
792 |
|
b374d18a4
|
793 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
794 795 796 797 798 |
mutex_lock(&e->sysfs_lock); error = e->ops ? entry->store(e, page, length) : -ENOENT; mutex_unlock(&e->sysfs_lock); return error; } |
52cf25d0a
|
799 |
static const struct sysfs_ops elv_sysfs_ops = { |
3d1ab40f4
|
800 801 802 803 804 805 806 807 808 809 810 |
.show = elv_attr_show, .store = elv_attr_store, }; static struct kobj_type elv_ktype = { .sysfs_ops = &elv_sysfs_ops, .release = elevator_release, }; int elv_register_queue(struct request_queue *q) { |
b374d18a4
|
811 |
struct elevator_queue *e = q->elevator; |
3d1ab40f4
|
812 |
int error; |
b2d6db587
|
813 |
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
3d1ab40f4
|
814 |
if (!error) { |
e572ec7e4
|
815 |
struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; |
3d1ab40f4
|
816 |
if (attr) { |
e572ec7e4
|
817 818 |
while (attr->attr.name) { if (sysfs_create_file(&e->kobj, &attr->attr)) |
3d1ab40f4
|
819 |
break; |
e572ec7e4
|
820 |
attr++; |
3d1ab40f4
|
821 822 823 |
} } kobject_uevent(&e->kobj, KOBJ_ADD); |
430c62fb2
|
824 |
e->registered = 1; |
3d1ab40f4
|
825 826 |
} return error; |
1da177e4c
|
827 |
} |
01effb0dc
|
828 |
EXPORT_SYMBOL(elv_register_queue); |
1da177e4c
|
829 |
|
b374d18a4
|
830 |
static void __elv_unregister_queue(struct elevator_queue *e) |
bc1c11697
|
831 832 833 |
{ kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); |
430c62fb2
|
834 |
e->registered = 0; |
bc1c11697
|
835 |
} |
1da177e4c
|
836 837 |
void elv_unregister_queue(struct request_queue *q) { |
bc1c11697
|
838 839 |
if (q) __elv_unregister_queue(q->elevator); |
1da177e4c
|
840 |
} |
01effb0dc
|
841 |
EXPORT_SYMBOL(elv_unregister_queue); |
1da177e4c
|
842 |
|
2fdd82bd8
|
843 |
void elv_register(struct elevator_type *e) |
1da177e4c
|
844 |
{ |
1ffb96c58
|
845 |
char *def = ""; |
2a12dcd71
|
846 847 |
spin_lock(&elv_list_lock); |
ce5244974
|
848 |
BUG_ON(elevator_find(e->elevator_name)); |
1da177e4c
|
849 |
list_add_tail(&e->list, &elv_list); |
2a12dcd71
|
850 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
851 |
|
5f0039764
|
852 853 854 |
if (!strcmp(e->elevator_name, chosen_elevator) || (!*chosen_elevator && !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) |
1ffb96c58
|
855 |
def = " (default)"; |
4eb166d98
|
856 857 858 |
printk(KERN_INFO "io scheduler %s registered%s ", e->elevator_name, def); |
1da177e4c
|
859 860 861 862 863 |
} EXPORT_SYMBOL_GPL(elv_register); void elv_unregister(struct elevator_type *e) { |
83521d3eb
|
864 865 866 867 868 |
struct task_struct *g, *p; /* * Iterate every thread in the process to remove the io contexts. */ |
e17a9489b
|
869 870 871 872 |
if (e->ops.trim) { read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); |
2d8f61316
|
873 874 |
if (p->io_context) e->ops.trim(p->io_context); |
e17a9489b
|
875 876 877 878 |
task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } |
83521d3eb
|
879 |
|
2a12dcd71
|
880 |
spin_lock(&elv_list_lock); |
1da177e4c
|
881 |
list_del_init(&e->list); |
2a12dcd71
|
882 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
883 884 885 886 887 888 889 |
} EXPORT_SYMBOL_GPL(elv_unregister); /* * switch to new_e io scheduler. be careful not to introduce deadlocks - * we don't free the old io scheduler, before we have allocated what we * need for the new one. this way we have a chance of going back to the old |
cb98fc8bb
|
890 |
* one, if the new one fails init for some reason. |
1da177e4c
|
891 |
*/ |
165125e1e
|
892 |
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
1da177e4c
|
893 |
{ |
b374d18a4
|
894 |
struct elevator_queue *old_elevator, *e; |
bc1c11697
|
895 |
void *data; |
5dd531a03
|
896 |
int err; |
1da177e4c
|
897 |
|
cb98fc8bb
|
898 899 900 |
/* * Allocate new elevator */ |
b5deef901
|
901 |
e = elevator_alloc(q, new_e); |
1da177e4c
|
902 |
if (!e) |
5dd531a03
|
903 |
return -ENOMEM; |
1da177e4c
|
904 |
|
bc1c11697
|
905 906 907 |
data = elevator_init_queue(q, e); if (!data) { kobject_put(&e->kobj); |
5dd531a03
|
908 |
return -ENOMEM; |
bc1c11697
|
909 |
} |
1da177e4c
|
910 |
/* |
cb98fc8bb
|
911 |
* Turn on BYPASS and drain all requests w/ elevator private data |
1da177e4c
|
912 |
*/ |
cb98fc8bb
|
913 |
spin_lock_irq(q->queue_lock); |
f600abe2d
|
914 |
elv_quiesce_start(q); |
cb98fc8bb
|
915 |
|
1da177e4c
|
916 |
/* |
bc1c11697
|
917 |
* Remember old elevator. |
1da177e4c
|
918 |
*/ |
1da177e4c
|
919 920 921 |
old_elevator = q->elevator; /* |
1da177e4c
|
922 923 |
* attach and start new elevator */ |
bc1c11697
|
924 925 926 |
elevator_attach(q, e, data); spin_unlock_irq(q->queue_lock); |
430c62fb2
|
927 928 |
if (old_elevator->registered) { __elv_unregister_queue(old_elevator); |
1da177e4c
|
929 |
|
430c62fb2
|
930 931 932 933 |
err = elv_register_queue(q); if (err) goto fail_register; } |
1da177e4c
|
934 935 |
/* |
cb98fc8bb
|
936 |
* finally exit old elevator and turn off BYPASS. |
1da177e4c
|
937 938 |
*/ elevator_exit(old_elevator); |
75ad23bc0
|
939 |
spin_lock_irq(q->queue_lock); |
f600abe2d
|
940 |
elv_quiesce_end(q); |
75ad23bc0
|
941 |
spin_unlock_irq(q->queue_lock); |
4722dc52a
|
942 |
blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); |
5dd531a03
|
943 |
return 0; |
1da177e4c
|
944 945 946 947 948 949 950 |
fail_register: /* * switch failed, exit the new io scheduler and reattach the old * one again (along with re-adding the sysfs dir) */ elevator_exit(e); |
1da177e4c
|
951 952 |
q->elevator = old_elevator; elv_register_queue(q); |
75ad23bc0
|
953 954 955 956 |
spin_lock_irq(q->queue_lock); queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); spin_unlock_irq(q->queue_lock); |
5dd531a03
|
957 |
return err; |
1da177e4c
|
958 |
} |
5dd531a03
|
959 960 961 962 |
/* * Switch this queue to the given IO scheduler. */ int elevator_change(struct request_queue *q, const char *name) |
1da177e4c
|
963 964 965 |
{ char elevator_name[ELV_NAME_MAX]; struct elevator_type *e; |
cd43e26f0
|
966 |
if (!q->elevator) |
5dd531a03
|
967 |
return -ENXIO; |
cd43e26f0
|
968 |
|
ee2e992cc
|
969 |
strlcpy(elevator_name, name, sizeof(elevator_name)); |
8c2795985
|
970 |
e = elevator_get(strstrip(elevator_name)); |
1da177e4c
|
971 972 973 974 975 |
if (!e) { printk(KERN_ERR "elevator: type %s not found ", elevator_name); return -EINVAL; } |
2ca7d93bb
|
976 977 |
if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { elevator_put(e); |
5dd531a03
|
978 |
return 0; |
2ca7d93bb
|
979 |
} |
1da177e4c
|
980 |
|
5dd531a03
|
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 |
return elevator_switch(q, e); } EXPORT_SYMBOL(elevator_change); ssize_t elv_iosched_store(struct request_queue *q, const char *name, size_t count) { int ret; if (!q->elevator) return count; ret = elevator_change(q, name); if (!ret) return count; printk(KERN_ERR "elevator: switch to %s failed ", name); return ret; |
1da177e4c
|
1000 |
} |
165125e1e
|
1001 |
ssize_t elv_iosched_show(struct request_queue *q, char *name) |
1da177e4c
|
1002 |
{ |
b374d18a4
|
1003 |
struct elevator_queue *e = q->elevator; |
cd43e26f0
|
1004 |
struct elevator_type *elv; |
70cee26e0
|
1005 |
struct elevator_type *__e; |
1da177e4c
|
1006 |
int len = 0; |
e36f724b4
|
1007 |
if (!q->elevator || !blk_queue_stackable(q)) |
cd43e26f0
|
1008 1009 1010 1011 |
return sprintf(name, "none "); elv = e->elevator_type; |
2a12dcd71
|
1012 |
spin_lock(&elv_list_lock); |
70cee26e0
|
1013 |
list_for_each_entry(__e, &elv_list, list) { |
1da177e4c
|
1014 1015 1016 1017 1018 |
if (!strcmp(elv->elevator_name, __e->elevator_name)) len += sprintf(name+len, "[%s] ", elv->elevator_name); else len += sprintf(name+len, "%s ", __e->elevator_name); } |
2a12dcd71
|
1019 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
1020 1021 1022 1023 1024 |
len += sprintf(len+name, " "); return len; } |
165125e1e
|
1025 1026 |
struct request *elv_rb_former_request(struct request_queue *q, struct request *rq) |
2e662b65f
|
1027 1028 1029 1030 1031 1032 1033 1034 |
{ struct rb_node *rbprev = rb_prev(&rq->rb_node); if (rbprev) return rb_entry_rq(rbprev); return NULL; } |
2e662b65f
|
1035 |
EXPORT_SYMBOL(elv_rb_former_request); |
165125e1e
|
1036 1037 |
struct request *elv_rb_latter_request(struct request_queue *q, struct request *rq) |
2e662b65f
|
1038 1039 1040 1041 1042 1043 1044 1045 |
{ struct rb_node *rbnext = rb_next(&rq->rb_node); if (rbnext) return rb_entry_rq(rbnext); return NULL; } |
2e662b65f
|
1046 |
EXPORT_SYMBOL(elv_rb_latter_request); |