Blame view
block/elevator.c
24.5 KB
1da177e4c
|
1 |
/* |
1da177e4c
|
2 3 4 5 |
* Block device elevator/IO-scheduler. * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * |
0fe234795
|
6 |
* 30042000 Jens Axboe <axboe@kernel.dk> : |
1da177e4c
|
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
* * Split the elevator a bit so that it is possible to choose a different * one or even write a new "plug in". There are three pieces: * - elevator_fn, inserts a new request in the queue list * - elevator_merge_fn, decides whether a new buffer can be merged with * an existing request * - elevator_dequeue_fn, called when a request is taken off the active list * * 20082000 Dave Jones <davej@suse.de> : * Removed tests for max-bomb-segments, which was breaking elvtune * when run without -bN * * Jens: * - Rework again to work with bio instead of buffer_heads * - loose bi_dev comparisons, partition handling is right now * - completely modularize elevator setup and teardown * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> |
1da177e4c
|
30 31 32 33 |
#include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/compiler.h> |
cb98fc8bb
|
34 |
#include <linux/delay.h> |
2056a782f
|
35 |
#include <linux/blktrace_api.h> |
9817064b6
|
36 |
#include <linux/hash.h> |
0835da67c
|
37 |
#include <linux/uaccess.h> |
1da177e4c
|
38 |
|
55782138e
|
39 |
#include <trace/events/block.h> |
242f9dcb8
|
40 |
#include "blk.h" |
1da177e4c
|
41 42 43 44 |
static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); /* |
9817064b6
|
45 46 47 48 |
* Merge hash stuff. */ static const int elv_hash_shift = 6; #define ELV_HASH_BLOCK(sec) ((sec) >> 3) |
4eb166d98
|
49 50 |
#define ELV_HASH_FN(sec) \ (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) |
9817064b6
|
51 |
#define ELV_HASH_ENTRIES (1 << elv_hash_shift) |
83096ebf1
|
52 |
#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
9817064b6
|
53 54 |
/* |
da7752650
|
55 56 57 58 59 |
* Query io scheduler to see if the current process issuing bio may be * merged with rq. */ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) { |
165125e1e
|
60 |
struct request_queue *q = rq->q; |
b374d18a4
|
61 |
struct elevator_queue *e = q->elevator; |
da7752650
|
62 63 64 65 66 67 68 69 |
if (e->ops->elevator_allow_merge_fn) return e->ops->elevator_allow_merge_fn(q, rq, bio); return 1; } /* |
1da177e4c
|
70 71 |
* can we safely merge with this request? */ |
72ed0bf60
|
72 |
int elv_rq_merge_ok(struct request *rq, struct bio *bio) |
1da177e4c
|
73 74 75 76 77 |
{ if (!rq_mergeable(rq)) return 0; /* |
e17fc0a1c
|
78 79 |
* Don't merge file system requests and discard requests */ |
7b6d91dae
|
80 |
if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD)) |
e17fc0a1c
|
81 82 83 |
return 0; /* |
8d57a98cc
|
84 85 86 87 88 89 |
* Don't merge discard requests and secure discard requests */ if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE)) return 0; /* |
1da177e4c
|
90 91 92 93 94 95 |
* different data direction or already started, don't merge */ if (bio_data_dir(bio) != rq_data_dir(rq)) return 0; /* |
da7752650
|
96 |
* must be same device and not a special request |
1da177e4c
|
97 |
*/ |
bb4067e34
|
98 |
if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) |
da7752650
|
99 |
return 0; |
7ba1ba12e
|
100 101 102 103 104 |
/* * only merge integrity protected bio into ditto rq */ if (bio_integrity(bio) != blk_integrity_rq(rq)) return 0; |
da7752650
|
105 106 |
if (!elv_iosched_allow_merge(rq, bio)) return 0; |
1da177e4c
|
107 |
|
da7752650
|
108 |
return 1; |
1da177e4c
|
109 110 |
} EXPORT_SYMBOL(elv_rq_merge_ok); |
73c101011
|
111 |
int elv_try_merge(struct request *__rq, struct bio *bio) |
1da177e4c
|
112 113 114 115 116 117 118 |
{ int ret = ELEVATOR_NO_MERGE; /* * we can merge and sequence is ok, check if it's possible */ if (elv_rq_merge_ok(__rq, bio)) { |
83096ebf1
|
119 |
if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector) |
1da177e4c
|
120 |
ret = ELEVATOR_BACK_MERGE; |
83096ebf1
|
121 |
else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector) |
1da177e4c
|
122 123 124 125 126 |
ret = ELEVATOR_FRONT_MERGE; } return ret; } |
1da177e4c
|
127 |
|
1da177e4c
|
128 129 |
static struct elevator_type *elevator_find(const char *name) { |
a22b169df
|
130 |
struct elevator_type *e; |
1da177e4c
|
131 |
|
70cee26e0
|
132 |
list_for_each_entry(e, &elv_list, list) { |
a22b169df
|
133 134 |
if (!strcmp(e->elevator_name, name)) return e; |
1da177e4c
|
135 |
} |
1da177e4c
|
136 |
|
a22b169df
|
137 |
return NULL; |
1da177e4c
|
138 139 140 141 142 143 144 145 146 |
} static void elevator_put(struct elevator_type *e) { module_put(e->elevator_owner); } static struct elevator_type *elevator_get(const char *name) { |
2824bc932
|
147 |
struct elevator_type *e; |
1da177e4c
|
148 |
|
2a12dcd71
|
149 |
spin_lock(&elv_list_lock); |
2824bc932
|
150 151 |
e = elevator_find(name); |
e16409496
|
152 |
if (!e) { |
e16409496
|
153 |
spin_unlock(&elv_list_lock); |
490b94be0
|
154 |
request_module("%s-iosched", name); |
e16409496
|
155 156 157 |
spin_lock(&elv_list_lock); e = elevator_find(name); } |
2824bc932
|
158 159 |
if (e && !try_module_get(e->elevator_owner)) e = NULL; |
2a12dcd71
|
160 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
161 162 163 |
return e; } |
165125e1e
|
164 165 |
static void *elevator_init_queue(struct request_queue *q, struct elevator_queue *eq) |
1da177e4c
|
166 |
{ |
bb37b94c6
|
167 |
return eq->ops->elevator_init_fn(q); |
bc1c11697
|
168 |
} |
1da177e4c
|
169 |
|
165125e1e
|
170 |
static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, |
bc1c11697
|
171 172 |
void *data) { |
1da177e4c
|
173 |
q->elevator = eq; |
bc1c11697
|
174 |
eq->elevator_data = data; |
1da177e4c
|
175 176 177 |
} static char chosen_elevator[16]; |
5f0039764
|
178 |
static int __init elevator_setup(char *str) |
1da177e4c
|
179 |
{ |
752a3b796
|
180 181 182 183 |
/* * Be backwards-compatible with previous kernels, so users * won't get the wrong elevator. */ |
492af6350
|
184 |
strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); |
9b41046cd
|
185 |
return 1; |
1da177e4c
|
186 187 188 |
} __setup("elevator=", elevator_setup); |
3d1ab40f4
|
189 |
static struct kobj_type elv_ktype; |
b374d18a4
|
190 |
static struct elevator_queue *elevator_alloc(struct request_queue *q, |
165125e1e
|
191 |
struct elevator_type *e) |
3d1ab40f4
|
192 |
{ |
b374d18a4
|
193 |
struct elevator_queue *eq; |
9817064b6
|
194 |
int i; |
b374d18a4
|
195 |
eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); |
9817064b6
|
196 197 |
if (unlikely(!eq)) goto err; |
9817064b6
|
198 199 |
eq->ops = &e->ops; eq->elevator_type = e; |
f9cb074bf
|
200 |
kobject_init(&eq->kobj, &elv_ktype); |
9817064b6
|
201 |
mutex_init(&eq->sysfs_lock); |
b5deef901
|
202 203 |
eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL, q->node); |
9817064b6
|
204 205 206 207 208 |
if (!eq->hash) goto err; for (i = 0; i < ELV_HASH_ENTRIES; i++) INIT_HLIST_HEAD(&eq->hash[i]); |
3d1ab40f4
|
209 |
return eq; |
9817064b6
|
210 211 212 213 |
err: kfree(eq); elevator_put(e); return NULL; |
3d1ab40f4
|
214 215 216 217 |
} static void elevator_release(struct kobject *kobj) { |
b374d18a4
|
218 |
struct elevator_queue *e; |
9817064b6
|
219 |
|
b374d18a4
|
220 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
221 |
elevator_put(e->elevator_type); |
9817064b6
|
222 |
kfree(e->hash); |
3d1ab40f4
|
223 224 |
kfree(e); } |
165125e1e
|
225 |
int elevator_init(struct request_queue *q, char *name) |
1da177e4c
|
226 227 228 |
{ struct elevator_type *e = NULL; struct elevator_queue *eq; |
bc1c11697
|
229 |
void *data; |
1da177e4c
|
230 |
|
1abec4fdb
|
231 232 |
if (unlikely(q->elevator)) return 0; |
cb98fc8bb
|
233 234 235 236 |
INIT_LIST_HEAD(&q->queue_head); q->last_merge = NULL; q->end_sector = 0; q->boundary_rq = NULL; |
cb98fc8bb
|
237 |
|
4eb166d98
|
238 239 240 241 242 |
if (name) { e = elevator_get(name); if (!e) return -EINVAL; } |
1da177e4c
|
243 |
|
4eb166d98
|
244 245 246 247 248 249 250 |
if (!e && *chosen_elevator) { e = elevator_get(chosen_elevator); if (!e) printk(KERN_ERR "I/O scheduler %s not found ", chosen_elevator); } |
248d5ca5e
|
251 |
|
4eb166d98
|
252 253 254 255 256 257 258 259 260 |
if (!e) { e = elevator_get(CONFIG_DEFAULT_IOSCHED); if (!e) { printk(KERN_ERR "Default I/O scheduler not found. " \ "Using noop. "); e = elevator_get("noop"); } |
5f0039764
|
261 |
} |
b5deef901
|
262 |
eq = elevator_alloc(q, e); |
3d1ab40f4
|
263 |
if (!eq) |
1da177e4c
|
264 |
return -ENOMEM; |
1da177e4c
|
265 |
|
bc1c11697
|
266 267 |
data = elevator_init_queue(q, eq); if (!data) { |
3d1ab40f4
|
268 |
kobject_put(&eq->kobj); |
bc1c11697
|
269 270 |
return -ENOMEM; } |
1da177e4c
|
271 |
|
bc1c11697
|
272 |
elevator_attach(q, eq, data); |
1abec4fdb
|
273 |
return 0; |
1da177e4c
|
274 |
} |
2e662b65f
|
275 |
EXPORT_SYMBOL(elevator_init); |
b374d18a4
|
276 |
void elevator_exit(struct elevator_queue *e) |
1da177e4c
|
277 |
{ |
3d1ab40f4
|
278 |
mutex_lock(&e->sysfs_lock); |
1da177e4c
|
279 280 |
if (e->ops->elevator_exit_fn) e->ops->elevator_exit_fn(e); |
3d1ab40f4
|
281 282 |
e->ops = NULL; mutex_unlock(&e->sysfs_lock); |
1da177e4c
|
283 |
|
3d1ab40f4
|
284 |
kobject_put(&e->kobj); |
1da177e4c
|
285 |
} |
2e662b65f
|
286 |
EXPORT_SYMBOL(elevator_exit); |
9817064b6
|
287 288 289 290 |
static inline void __elv_rqhash_del(struct request *rq) { hlist_del_init(&rq->hash); } |
165125e1e
|
291 |
static void elv_rqhash_del(struct request_queue *q, struct request *rq) |
9817064b6
|
292 293 294 295 |
{ if (ELV_ON_HASH(rq)) __elv_rqhash_del(rq); } |
165125e1e
|
296 |
static void elv_rqhash_add(struct request_queue *q, struct request *rq) |
9817064b6
|
297 |
{ |
b374d18a4
|
298 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
299 300 301 302 |
BUG_ON(ELV_ON_HASH(rq)); hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); } |
165125e1e
|
303 |
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
9817064b6
|
304 305 306 307 |
{ __elv_rqhash_del(rq); elv_rqhash_add(q, rq); } |
165125e1e
|
308 |
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
9817064b6
|
309 |
{ |
b374d18a4
|
310 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 |
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; struct hlist_node *entry, *next; struct request *rq; hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { BUG_ON(!ELV_ON_HASH(rq)); if (unlikely(!rq_mergeable(rq))) { __elv_rqhash_del(rq); continue; } if (rq_hash_key(rq) == offset) return rq; } return NULL; } |
8922e16cf
|
329 |
/* |
2e662b65f
|
330 331 332 333 334 335 336 337 338 339 340 341 |
* RB-tree support functions for inserting/lookup/removal of requests * in a sorted RB tree. */ struct request *elv_rb_add(struct rb_root *root, struct request *rq) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct request *__rq; while (*p) { parent = *p; __rq = rb_entry(parent, struct request, rb_node); |
83096ebf1
|
342 |
if (blk_rq_pos(rq) < blk_rq_pos(__rq)) |
2e662b65f
|
343 |
p = &(*p)->rb_left; |
83096ebf1
|
344 |
else if (blk_rq_pos(rq) > blk_rq_pos(__rq)) |
2e662b65f
|
345 346 347 348 349 350 351 352 353 |
p = &(*p)->rb_right; else return __rq; } rb_link_node(&rq->rb_node, parent, p); rb_insert_color(&rq->rb_node, root); return NULL; } |
2e662b65f
|
354 355 356 357 358 359 360 361 |
EXPORT_SYMBOL(elv_rb_add); void elv_rb_del(struct rb_root *root, struct request *rq) { BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); rb_erase(&rq->rb_node, root); RB_CLEAR_NODE(&rq->rb_node); } |
2e662b65f
|
362 363 364 365 366 367 368 369 370 |
EXPORT_SYMBOL(elv_rb_del); struct request *elv_rb_find(struct rb_root *root, sector_t sector) { struct rb_node *n = root->rb_node; struct request *rq; while (n) { rq = rb_entry(n, struct request, rb_node); |
83096ebf1
|
371 |
if (sector < blk_rq_pos(rq)) |
2e662b65f
|
372 |
n = n->rb_left; |
83096ebf1
|
373 |
else if (sector > blk_rq_pos(rq)) |
2e662b65f
|
374 375 376 377 378 379 380 |
n = n->rb_right; else return rq; } return NULL; } |
2e662b65f
|
381 382 383 |
EXPORT_SYMBOL(elv_rb_find); /* |
8922e16cf
|
384 |
* Insert rq into dispatch queue of q. Queue lock must be held on |
dbe7f76dd
|
385 |
* entry. rq is sort instead into the dispatch queue. To be used by |
2e662b65f
|
386 |
* specific elevators. |
8922e16cf
|
387 |
*/ |
165125e1e
|
388 |
void elv_dispatch_sort(struct request_queue *q, struct request *rq) |
8922e16cf
|
389 390 |
{ sector_t boundary; |
8922e16cf
|
391 |
struct list_head *entry; |
4eb166d98
|
392 |
int stop_flags; |
8922e16cf
|
393 |
|
06b86245c
|
394 395 |
if (q->last_merge == rq) q->last_merge = NULL; |
9817064b6
|
396 397 |
elv_rqhash_del(q, rq); |
15853af9f
|
398 |
q->nr_sorted--; |
06b86245c
|
399 |
|
1b47f531e
|
400 |
boundary = q->end_sector; |
02e031cbc
|
401 |
stop_flags = REQ_SOFTBARRIER | REQ_STARTED; |
8922e16cf
|
402 403 |
list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); |
33659ebba
|
404 405 |
if ((rq->cmd_flags & REQ_DISCARD) != (pos->cmd_flags & REQ_DISCARD)) |
e17fc0a1c
|
406 |
break; |
783660b2f
|
407 408 |
if (rq_data_dir(rq) != rq_data_dir(pos)) break; |
4eb166d98
|
409 |
if (pos->cmd_flags & stop_flags) |
8922e16cf
|
410 |
break; |
83096ebf1
|
411 412 |
if (blk_rq_pos(rq) >= boundary) { if (blk_rq_pos(pos) < boundary) |
8922e16cf
|
413 414 |
continue; } else { |
83096ebf1
|
415 |
if (blk_rq_pos(pos) >= boundary) |
8922e16cf
|
416 417 |
break; } |
83096ebf1
|
418 |
if (blk_rq_pos(rq) >= blk_rq_pos(pos)) |
8922e16cf
|
419 420 421 422 423 |
break; } list_add(&rq->queuelist, entry); } |
2e662b65f
|
424 |
EXPORT_SYMBOL(elv_dispatch_sort); |
9817064b6
|
425 |
/* |
2e662b65f
|
426 427 428 |
* Insert rq into dispatch queue of q. Queue lock must be held on * entry. rq is added to the back of the dispatch queue. To be used by * specific elevators. |
9817064b6
|
429 430 431 432 433 434 435 436 437 438 439 440 441 442 |
*/ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) { if (q->last_merge == rq) q->last_merge = NULL; elv_rqhash_del(q, rq); q->nr_sorted--; q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; list_add_tail(&rq->queuelist, &q->queue_head); } |
2e662b65f
|
443 |
EXPORT_SYMBOL(elv_dispatch_add_tail); |
165125e1e
|
444 |
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) |
1da177e4c
|
445 |
{ |
b374d18a4
|
446 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
447 |
struct request *__rq; |
06b86245c
|
448 |
int ret; |
9817064b6
|
449 |
/* |
488991e28
|
450 451 452 453 454 455 456 457 458 |
* Levels of merges: * nomerges: No merges at all attempted * noxmerges: Only simple one-hit cache try * merges: All merge tries attempted */ if (blk_queue_nomerges(q)) return ELEVATOR_NO_MERGE; /* |
9817064b6
|
459 460 |
* First try one-hit cache. */ |
06b86245c
|
461 462 463 464 465 466 467 |
if (q->last_merge) { ret = elv_try_merge(q->last_merge, bio); if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; return ret; } } |
1da177e4c
|
468 |
|
488991e28
|
469 |
if (blk_queue_noxmerges(q)) |
ac9fafa12
|
470 |
return ELEVATOR_NO_MERGE; |
9817064b6
|
471 472 473 474 475 476 477 478 |
/* * See if our hash lookup can find a potential backmerge. */ __rq = elv_rqhash_find(q, bio->bi_sector); if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_BACK_MERGE; } |
1da177e4c
|
479 480 481 482 483 |
if (e->ops->elevator_merge_fn) return e->ops->elevator_merge_fn(q, req, bio); return ELEVATOR_NO_MERGE; } |
5e84ea3a9
|
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 |
/* * Attempt to do an insertion back merge. Only check for the case where * we can append 'rq' to an existing request, so we can throw 'rq' away * afterwards. * * Returns true if we merged, false otherwise */ static bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) { struct request *__rq; if (blk_queue_nomerges(q)) return false; /* * First try one-hit cache. */ if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) return true; if (blk_queue_noxmerges(q)) return false; /* * See if our hash lookup can find a potential backmerge. */ __rq = elv_rqhash_find(q, blk_rq_pos(rq)); if (__rq && blk_attempt_req_merge(q, __rq, rq)) return true; return false; } |
165125e1e
|
517 |
void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
1da177e4c
|
518 |
{ |
b374d18a4
|
519 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
520 521 |
if (e->ops->elevator_merged_fn) |
2e662b65f
|
522 |
e->ops->elevator_merged_fn(q, rq, type); |
06b86245c
|
523 |
|
2e662b65f
|
524 525 |
if (type == ELEVATOR_BACK_MERGE) elv_rqhash_reposition(q, rq); |
9817064b6
|
526 |
|
06b86245c
|
527 |
q->last_merge = rq; |
1da177e4c
|
528 |
} |
165125e1e
|
529 |
void elv_merge_requests(struct request_queue *q, struct request *rq, |
1da177e4c
|
530 531 |
struct request *next) { |
b374d18a4
|
532 |
struct elevator_queue *e = q->elevator; |
5e84ea3a9
|
533 |
const int next_sorted = next->cmd_flags & REQ_SORTED; |
1da177e4c
|
534 |
|
5e84ea3a9
|
535 |
if (next_sorted && e->ops->elevator_merge_req_fn) |
1da177e4c
|
536 |
e->ops->elevator_merge_req_fn(q, rq, next); |
06b86245c
|
537 |
|
9817064b6
|
538 |
elv_rqhash_reposition(q, rq); |
9817064b6
|
539 |
|
5e84ea3a9
|
540 541 542 543 |
if (next_sorted) { elv_rqhash_del(q, next); q->nr_sorted--; } |
06b86245c
|
544 |
q->last_merge = rq; |
1da177e4c
|
545 |
} |
812d40264
|
546 547 548 549 550 551 552 553 |
void elv_bio_merged(struct request_queue *q, struct request *rq, struct bio *bio) { struct elevator_queue *e = q->elevator; if (e->ops->elevator_bio_merged_fn) e->ops->elevator_bio_merged_fn(q, rq, bio); } |
165125e1e
|
554 |
void elv_requeue_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
555 |
{ |
1da177e4c
|
556 557 558 559 |
/* * it already went through dequeue, we need to decrement the * in_flight count again */ |
8922e16cf
|
560 |
if (blk_account_rq(rq)) { |
0a7ae2ff0
|
561 |
q->in_flight[rq_is_sync(rq)]--; |
33659ebba
|
562 |
if (rq->cmd_flags & REQ_SORTED) |
cad975164
|
563 |
elv_deactivate_rq(q, rq); |
8922e16cf
|
564 |
} |
1da177e4c
|
565 |
|
4aff5e233
|
566 |
rq->cmd_flags &= ~REQ_STARTED; |
1da177e4c
|
567 |
|
b710a4805
|
568 |
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); |
1da177e4c
|
569 |
} |
26308eab6
|
570 |
void elv_drain_elevator(struct request_queue *q) |
15853af9f
|
571 572 573 574 575 576 577 578 579 580 581 582 583 |
{ static int printed; while (q->elevator->ops->elevator_dispatch_fn(q, 1)) ; if (q->nr_sorted == 0) return; if (printed++ < 10) { printk(KERN_ERR "%s: forced dispatching is broken " "(nr_sorted=%u), please report this ", q->elevator->elevator_type->elevator_name, q->nr_sorted); } } |
6c7e8cee6
|
584 585 586 |
/* * Call with queue lock held, interrupts disabled */ |
f600abe2d
|
587 |
void elv_quiesce_start(struct request_queue *q) |
6c7e8cee6
|
588 |
{ |
cd43e26f0
|
589 590 |
if (!q->elevator) return; |
6c7e8cee6
|
591 592 593 594 595 596 597 |
queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); /* * make sure we don't have any requests in flight */ elv_drain_elevator(q); while (q->rq.elvpriv) { |
24ecfbe27
|
598 |
__blk_run_queue(q); |
6c7e8cee6
|
599 600 601 602 603 604 |
spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); elv_drain_elevator(q); } } |
f600abe2d
|
605 |
void elv_quiesce_end(struct request_queue *q) |
6c7e8cee6
|
606 607 608 |
{ queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); } |
b710a4805
|
609 |
void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
1da177e4c
|
610 |
{ |
5f3ea37c7
|
611 |
trace_block_rq_insert(q, rq); |
2056a782f
|
612 |
|
1da177e4c
|
613 |
rq->q = q; |
b710a4805
|
614 615 616 617 618 619 620 621 |
if (rq->cmd_flags & REQ_SOFTBARRIER) { /* barriers are scheduling boundary, update end_sector */ if (rq->cmd_type == REQ_TYPE_FS || (rq->cmd_flags & REQ_DISCARD)) { q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; } } else if (!(rq->cmd_flags & REQ_ELVPRIV) && |
3aa72873f
|
622 623 |
(where == ELEVATOR_INSERT_SORT || where == ELEVATOR_INSERT_SORT_MERGE)) |
b710a4805
|
624 |
where = ELEVATOR_INSERT_BACK; |
8922e16cf
|
625 |
switch (where) { |
28e7d1845
|
626 |
case ELEVATOR_INSERT_REQUEUE: |
8922e16cf
|
627 |
case ELEVATOR_INSERT_FRONT: |
4aff5e233
|
628 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
8922e16cf
|
629 630 631 632 |
list_add(&rq->queuelist, &q->queue_head); break; case ELEVATOR_INSERT_BACK: |
4aff5e233
|
633 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
15853af9f
|
634 |
elv_drain_elevator(q); |
8922e16cf
|
635 636 637 638 639 640 641 642 643 644 645 |
list_add_tail(&rq->queuelist, &q->queue_head); /* * We kick the queue here for the following reasons. * - The elevator might have returned NULL previously * to delay requests and returned them now. As the * queue wasn't empty before this request, ll_rw_blk * won't run the queue on return, resulting in hang. * - Usually, back inserted requests won't be merged * with anything. There's no point in delaying queue * processing. */ |
24ecfbe27
|
646 |
__blk_run_queue(q); |
8922e16cf
|
647 |
break; |
5e84ea3a9
|
648 649 650 651 652 653 654 655 |
case ELEVATOR_INSERT_SORT_MERGE: /* * If we succeed in merging this request with one in the * queue already, we are done - rq has now been freed, * so no need to do anything further. */ if (elv_attempt_insert_merge(q, rq)) break; |
8922e16cf
|
656 |
case ELEVATOR_INSERT_SORT: |
33659ebba
|
657 658 |
BUG_ON(rq->cmd_type != REQ_TYPE_FS && !(rq->cmd_flags & REQ_DISCARD)); |
4aff5e233
|
659 |
rq->cmd_flags |= REQ_SORTED; |
15853af9f
|
660 |
q->nr_sorted++; |
9817064b6
|
661 662 663 664 665 |
if (rq_mergeable(rq)) { elv_rqhash_add(q, rq); if (!q->last_merge) q->last_merge = rq; } |
ca23509fb
|
666 667 668 669 670 671 |
/* * Some ioscheds (cfq) run q->request_fn directly, so * rq cannot be accessed after calling * elevator_add_req_fn. */ q->elevator->ops->elevator_add_req_fn(q, rq); |
8922e16cf
|
672 |
break; |
ae1b15396
|
673 674 675 676 |
case ELEVATOR_INSERT_FLUSH: rq->cmd_flags |= REQ_SOFTBARRIER; blk_insert_flush(rq); break; |
8922e16cf
|
677 678 679 |
default: printk(KERN_ERR "%s: bad insertion point %d ", |
24c03d47d
|
680 |
__func__, where); |
8922e16cf
|
681 682 |
BUG(); } |
1da177e4c
|
683 |
} |
2e662b65f
|
684 |
EXPORT_SYMBOL(__elv_add_request); |
7eaceacca
|
685 |
void elv_add_request(struct request_queue *q, struct request *rq, int where) |
1da177e4c
|
686 687 688 689 |
{ unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); |
7eaceacca
|
690 |
__elv_add_request(q, rq, where); |
1da177e4c
|
691 692 |
spin_unlock_irqrestore(q->queue_lock, flags); } |
2e662b65f
|
693 |
EXPORT_SYMBOL(elv_add_request); |
165125e1e
|
694 |
struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
695 |
{ |
b374d18a4
|
696 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
697 698 699 |
if (e->ops->elevator_latter_req_fn) return e->ops->elevator_latter_req_fn(q, rq); |
1da177e4c
|
700 701 |
return NULL; } |
165125e1e
|
702 |
struct request *elv_former_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
703 |
{ |
b374d18a4
|
704 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
705 706 707 |
if (e->ops->elevator_former_req_fn) return e->ops->elevator_former_req_fn(q, rq); |
1da177e4c
|
708 709 |
return NULL; } |
165125e1e
|
710 |
int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
1da177e4c
|
711 |
{ |
b374d18a4
|
712 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
713 714 |
if (e->ops->elevator_set_req_fn) |
cb78b285c
|
715 |
return e->ops->elevator_set_req_fn(q, rq, gfp_mask); |
1da177e4c
|
716 |
|
c186794db
|
717 |
rq->elevator_private[0] = NULL; |
1da177e4c
|
718 719 |
return 0; } |
165125e1e
|
720 |
void elv_put_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
721 |
{ |
b374d18a4
|
722 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
723 724 |
if (e->ops->elevator_put_req_fn) |
bb37b94c6
|
725 |
e->ops->elevator_put_req_fn(rq); |
1da177e4c
|
726 |
} |
165125e1e
|
727 |
int elv_may_queue(struct request_queue *q, int rw) |
1da177e4c
|
728 |
{ |
b374d18a4
|
729 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
730 731 |
if (e->ops->elevator_may_queue_fn) |
cb78b285c
|
732 |
return e->ops->elevator_may_queue_fn(q, rw); |
1da177e4c
|
733 734 735 |
return ELV_MQUEUE_MAY; } |
11914a53d
|
736 737 738 |
void elv_abort_queue(struct request_queue *q) { struct request *rq; |
ae1b15396
|
739 |
blk_abort_flushes(q); |
11914a53d
|
740 741 742 |
while (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); rq->cmd_flags |= REQ_QUIET; |
5f3ea37c7
|
743 |
trace_block_rq_abort(q, rq); |
53c663ce0
|
744 745 746 747 748 |
/* * Mark this request as started so we don't trigger * any debug logic in the end I/O path. */ blk_start_request(rq); |
40cbbb781
|
749 |
__blk_end_request_all(rq, -EIO); |
11914a53d
|
750 751 752 |
} } EXPORT_SYMBOL(elv_abort_queue); |
165125e1e
|
753 |
void elv_completed_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
754 |
{ |
b374d18a4
|
755 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
756 757 758 759 |
/* * request is released from the driver, io must be done */ |
8922e16cf
|
760 |
if (blk_account_rq(rq)) { |
0a7ae2ff0
|
761 |
q->in_flight[rq_is_sync(rq)]--; |
33659ebba
|
762 763 |
if ((rq->cmd_flags & REQ_SORTED) && e->ops->elevator_completed_req_fn) |
1bc691d35
|
764 765 |
e->ops->elevator_completed_req_fn(q, rq); } |
1da177e4c
|
766 |
} |
3d1ab40f4
|
767 768 769 770 |
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) static ssize_t elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
1da177e4c
|
771 |
{ |
3d1ab40f4
|
772 |
struct elv_fs_entry *entry = to_elv(attr); |
b374d18a4
|
773 |
struct elevator_queue *e; |
3d1ab40f4
|
774 775 776 777 |
ssize_t error; if (!entry->show) return -EIO; |
b374d18a4
|
778 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
779 780 781 782 783 |
mutex_lock(&e->sysfs_lock); error = e->ops ? entry->show(e, page) : -ENOENT; mutex_unlock(&e->sysfs_lock); return error; } |
1da177e4c
|
784 |
|
3d1ab40f4
|
785 786 787 788 |
static ssize_t elv_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { |
3d1ab40f4
|
789 |
struct elv_fs_entry *entry = to_elv(attr); |
b374d18a4
|
790 |
struct elevator_queue *e; |
3d1ab40f4
|
791 |
ssize_t error; |
1da177e4c
|
792 |
|
3d1ab40f4
|
793 794 |
if (!entry->store) return -EIO; |
1da177e4c
|
795 |
|
b374d18a4
|
796 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
797 798 799 800 801 |
mutex_lock(&e->sysfs_lock); error = e->ops ? entry->store(e, page, length) : -ENOENT; mutex_unlock(&e->sysfs_lock); return error; } |
52cf25d0a
|
802 |
static const struct sysfs_ops elv_sysfs_ops = { |
3d1ab40f4
|
803 804 805 806 807 808 809 810 811 812 813 |
.show = elv_attr_show, .store = elv_attr_store, }; static struct kobj_type elv_ktype = { .sysfs_ops = &elv_sysfs_ops, .release = elevator_release, }; int elv_register_queue(struct request_queue *q) { |
b374d18a4
|
814 |
struct elevator_queue *e = q->elevator; |
3d1ab40f4
|
815 |
int error; |
b2d6db587
|
816 |
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
3d1ab40f4
|
817 |
if (!error) { |
e572ec7e4
|
818 |
struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; |
3d1ab40f4
|
819 |
if (attr) { |
e572ec7e4
|
820 821 |
while (attr->attr.name) { if (sysfs_create_file(&e->kobj, &attr->attr)) |
3d1ab40f4
|
822 |
break; |
e572ec7e4
|
823 |
attr++; |
3d1ab40f4
|
824 825 826 |
} } kobject_uevent(&e->kobj, KOBJ_ADD); |
430c62fb2
|
827 |
e->registered = 1; |
3d1ab40f4
|
828 829 |
} return error; |
1da177e4c
|
830 |
} |
01effb0dc
|
831 |
EXPORT_SYMBOL(elv_register_queue); |
1da177e4c
|
832 |
|
b374d18a4
|
833 |
static void __elv_unregister_queue(struct elevator_queue *e) |
bc1c11697
|
834 835 836 |
{ kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); |
430c62fb2
|
837 |
e->registered = 0; |
bc1c11697
|
838 |
} |
1da177e4c
|
839 840 |
void elv_unregister_queue(struct request_queue *q) { |
bc1c11697
|
841 842 |
if (q) __elv_unregister_queue(q->elevator); |
1da177e4c
|
843 |
} |
01effb0dc
|
844 |
EXPORT_SYMBOL(elv_unregister_queue); |
1da177e4c
|
845 |
|
2fdd82bd8
|
846 |
void elv_register(struct elevator_type *e) |
1da177e4c
|
847 |
{ |
1ffb96c58
|
848 |
char *def = ""; |
2a12dcd71
|
849 850 |
spin_lock(&elv_list_lock); |
ce5244974
|
851 |
BUG_ON(elevator_find(e->elevator_name)); |
1da177e4c
|
852 |
list_add_tail(&e->list, &elv_list); |
2a12dcd71
|
853 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
854 |
|
5f0039764
|
855 856 857 |
if (!strcmp(e->elevator_name, chosen_elevator) || (!*chosen_elevator && !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) |
1ffb96c58
|
858 |
def = " (default)"; |
4eb166d98
|
859 860 861 |
printk(KERN_INFO "io scheduler %s registered%s ", e->elevator_name, def); |
1da177e4c
|
862 863 864 865 866 |
} EXPORT_SYMBOL_GPL(elv_register); void elv_unregister(struct elevator_type *e) { |
83521d3eb
|
867 868 869 870 871 |
struct task_struct *g, *p; /* * Iterate every thread in the process to remove the io contexts. */ |
e17a9489b
|
872 873 874 875 |
if (e->ops.trim) { read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); |
2d8f61316
|
876 877 |
if (p->io_context) e->ops.trim(p->io_context); |
e17a9489b
|
878 879 880 881 |
task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } |
83521d3eb
|
882 |
|
2a12dcd71
|
883 |
spin_lock(&elv_list_lock); |
1da177e4c
|
884 |
list_del_init(&e->list); |
2a12dcd71
|
885 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
886 887 888 889 890 891 892 |
} EXPORT_SYMBOL_GPL(elv_unregister); /* * switch to new_e io scheduler. be careful not to introduce deadlocks - * we don't free the old io scheduler, before we have allocated what we * need for the new one. this way we have a chance of going back to the old |
cb98fc8bb
|
893 |
* one, if the new one fails init for some reason. |
1da177e4c
|
894 |
*/ |
165125e1e
|
895 |
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
1da177e4c
|
896 |
{ |
b374d18a4
|
897 |
struct elevator_queue *old_elevator, *e; |
bc1c11697
|
898 |
void *data; |
5dd531a03
|
899 |
int err; |
1da177e4c
|
900 |
|
cb98fc8bb
|
901 902 903 |
/* * Allocate new elevator */ |
b5deef901
|
904 |
e = elevator_alloc(q, new_e); |
1da177e4c
|
905 |
if (!e) |
5dd531a03
|
906 |
return -ENOMEM; |
1da177e4c
|
907 |
|
bc1c11697
|
908 909 910 |
data = elevator_init_queue(q, e); if (!data) { kobject_put(&e->kobj); |
5dd531a03
|
911 |
return -ENOMEM; |
bc1c11697
|
912 |
} |
1da177e4c
|
913 |
/* |
cb98fc8bb
|
914 |
* Turn on BYPASS and drain all requests w/ elevator private data |
1da177e4c
|
915 |
*/ |
cb98fc8bb
|
916 |
spin_lock_irq(q->queue_lock); |
f600abe2d
|
917 |
elv_quiesce_start(q); |
cb98fc8bb
|
918 |
|
1da177e4c
|
919 |
/* |
bc1c11697
|
920 |
* Remember old elevator. |
1da177e4c
|
921 |
*/ |
1da177e4c
|
922 923 924 |
old_elevator = q->elevator; /* |
1da177e4c
|
925 926 |
* attach and start new elevator */ |
bc1c11697
|
927 928 929 |
elevator_attach(q, e, data); spin_unlock_irq(q->queue_lock); |
430c62fb2
|
930 931 |
if (old_elevator->registered) { __elv_unregister_queue(old_elevator); |
1da177e4c
|
932 |
|
430c62fb2
|
933 934 935 936 |
err = elv_register_queue(q); if (err) goto fail_register; } |
1da177e4c
|
937 938 |
/* |
cb98fc8bb
|
939 |
* finally exit old elevator and turn off BYPASS. |
1da177e4c
|
940 941 |
*/ elevator_exit(old_elevator); |
75ad23bc0
|
942 |
spin_lock_irq(q->queue_lock); |
f600abe2d
|
943 |
elv_quiesce_end(q); |
75ad23bc0
|
944 |
spin_unlock_irq(q->queue_lock); |
4722dc52a
|
945 |
blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); |
5dd531a03
|
946 |
return 0; |
1da177e4c
|
947 948 949 950 951 952 953 |
fail_register: /* * switch failed, exit the new io scheduler and reattach the old * one again (along with re-adding the sysfs dir) */ elevator_exit(e); |
1da177e4c
|
954 955 |
q->elevator = old_elevator; elv_register_queue(q); |
75ad23bc0
|
956 957 958 959 |
spin_lock_irq(q->queue_lock); queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); spin_unlock_irq(q->queue_lock); |
5dd531a03
|
960 |
return err; |
1da177e4c
|
961 |
} |
5dd531a03
|
962 963 964 965 |
/* * Switch this queue to the given IO scheduler. */ int elevator_change(struct request_queue *q, const char *name) |
1da177e4c
|
966 967 968 |
{ char elevator_name[ELV_NAME_MAX]; struct elevator_type *e; |
cd43e26f0
|
969 |
if (!q->elevator) |
5dd531a03
|
970 |
return -ENXIO; |
cd43e26f0
|
971 |
|
ee2e992cc
|
972 |
strlcpy(elevator_name, name, sizeof(elevator_name)); |
8c2795985
|
973 |
e = elevator_get(strstrip(elevator_name)); |
1da177e4c
|
974 975 976 977 978 |
if (!e) { printk(KERN_ERR "elevator: type %s not found ", elevator_name); return -EINVAL; } |
2ca7d93bb
|
979 980 |
if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { elevator_put(e); |
5dd531a03
|
981 |
return 0; |
2ca7d93bb
|
982 |
} |
1da177e4c
|
983 |
|
5dd531a03
|
984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 |
return elevator_switch(q, e); } EXPORT_SYMBOL(elevator_change); ssize_t elv_iosched_store(struct request_queue *q, const char *name, size_t count) { int ret; if (!q->elevator) return count; ret = elevator_change(q, name); if (!ret) return count; printk(KERN_ERR "elevator: switch to %s failed ", name); return ret; |
1da177e4c
|
1003 |
} |
165125e1e
|
1004 |
ssize_t elv_iosched_show(struct request_queue *q, char *name) |
1da177e4c
|
1005 |
{ |
b374d18a4
|
1006 |
struct elevator_queue *e = q->elevator; |
cd43e26f0
|
1007 |
struct elevator_type *elv; |
70cee26e0
|
1008 |
struct elevator_type *__e; |
1da177e4c
|
1009 |
int len = 0; |
e36f724b4
|
1010 |
if (!q->elevator || !blk_queue_stackable(q)) |
cd43e26f0
|
1011 1012 1013 1014 |
return sprintf(name, "none "); elv = e->elevator_type; |
2a12dcd71
|
1015 |
spin_lock(&elv_list_lock); |
70cee26e0
|
1016 |
list_for_each_entry(__e, &elv_list, list) { |
1da177e4c
|
1017 1018 1019 1020 1021 |
if (!strcmp(elv->elevator_name, __e->elevator_name)) len += sprintf(name+len, "[%s] ", elv->elevator_name); else len += sprintf(name+len, "%s ", __e->elevator_name); } |
2a12dcd71
|
1022 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
1023 1024 1025 1026 1027 |
len += sprintf(len+name, " "); return len; } |
165125e1e
|
1028 1029 |
struct request *elv_rb_former_request(struct request_queue *q, struct request *rq) |
2e662b65f
|
1030 1031 1032 1033 1034 1035 1036 1037 |
{ struct rb_node *rbprev = rb_prev(&rq->rb_node); if (rbprev) return rb_entry_rq(rbprev); return NULL; } |
2e662b65f
|
1038 |
EXPORT_SYMBOL(elv_rb_former_request); |
165125e1e
|
1039 1040 |
struct request *elv_rb_latter_request(struct request_queue *q, struct request *rq) |
2e662b65f
|
1041 1042 1043 1044 1045 1046 1047 1048 |
{ struct rb_node *rbnext = rb_next(&rq->rb_node); if (rbnext) return rb_entry_rq(rbnext); return NULL; } |
2e662b65f
|
1049 |
EXPORT_SYMBOL(elv_rb_latter_request); |