Blame view
block/elevator.c
26.9 KB
1da177e4c
|
1 |
/* |
1da177e4c
|
2 3 4 5 |
* Block device elevator/IO-scheduler. * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * |
0fe234795
|
6 |
* 30042000 Jens Axboe <axboe@kernel.dk> : |
1da177e4c
|
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
* * Split the elevator a bit so that it is possible to choose a different * one or even write a new "plug in". There are three pieces: * - elevator_fn, inserts a new request in the queue list * - elevator_merge_fn, decides whether a new buffer can be merged with * an existing request * - elevator_dequeue_fn, called when a request is taken off the active list * * 20082000 Dave Jones <davej@suse.de> : * Removed tests for max-bomb-segments, which was breaking elvtune * when run without -bN * * Jens: * - Rework again to work with bio instead of buffer_heads * - loose bi_dev comparisons, partition handling is right now * - completely modularize elevator setup and teardown * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> |
1da177e4c
|
30 31 32 33 |
#include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/compiler.h> |
cb98fc8bb
|
34 |
#include <linux/delay.h> |
2056a782f
|
35 |
#include <linux/blktrace_api.h> |
5f3ea37c7
|
36 |
#include <trace/block.h> |
9817064b6
|
37 |
#include <linux/hash.h> |
0835da67c
|
38 |
#include <linux/uaccess.h> |
1da177e4c
|
39 |
|
242f9dcb8
|
40 |
#include "blk.h" |
1da177e4c
|
41 42 |
static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); |
0bfc24559
|
43 |
DEFINE_TRACE(block_rq_abort); |
1da177e4c
|
44 |
/* |
9817064b6
|
45 46 47 48 |
* Merge hash stuff. */ static const int elv_hash_shift = 6; #define ELV_HASH_BLOCK(sec) ((sec) >> 3) |
4eb166d98
|
49 50 |
#define ELV_HASH_FN(sec) \ (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) |
9817064b6
|
51 52 53 |
#define ELV_HASH_ENTRIES (1 << elv_hash_shift) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) |
0bfc24559
|
54 55 |
DEFINE_TRACE(block_rq_insert); DEFINE_TRACE(block_rq_issue); |
9817064b6
|
56 |
/* |
da7752650
|
57 58 59 60 61 |
* Query io scheduler to see if the current process issuing bio may be * merged with rq. */ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) { |
165125e1e
|
62 |
struct request_queue *q = rq->q; |
b374d18a4
|
63 |
struct elevator_queue *e = q->elevator; |
da7752650
|
64 65 66 67 68 69 70 71 |
if (e->ops->elevator_allow_merge_fn) return e->ops->elevator_allow_merge_fn(q, rq, bio); return 1; } /* |
1da177e4c
|
72 73 |
* can we safely merge with this request? */ |
72ed0bf60
|
74 |
int elv_rq_merge_ok(struct request *rq, struct bio *bio) |
1da177e4c
|
75 76 77 78 79 |
{ if (!rq_mergeable(rq)) return 0; /* |
e17fc0a1c
|
80 81 82 83 84 85 |
* Don't merge file system requests and discard requests */ if (bio_discard(bio) != bio_discard(rq->bio)) return 0; /* |
1da177e4c
|
86 87 88 89 90 91 |
* different data direction or already started, don't merge */ if (bio_data_dir(bio) != rq_data_dir(rq)) return 0; /* |
da7752650
|
92 |
* must be same device and not a special request |
1da177e4c
|
93 |
*/ |
bb4067e34
|
94 |
if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) |
da7752650
|
95 |
return 0; |
7ba1ba12e
|
96 97 98 99 100 |
/* * only merge integrity protected bio into ditto rq */ if (bio_integrity(bio) != blk_integrity_rq(rq)) return 0; |
da7752650
|
101 102 |
if (!elv_iosched_allow_merge(rq, bio)) return 0; |
1da177e4c
|
103 |
|
da7752650
|
104 |
return 1; |
1da177e4c
|
105 106 |
} EXPORT_SYMBOL(elv_rq_merge_ok); |
769db45b7
|
107 |
static inline int elv_try_merge(struct request *__rq, struct bio *bio) |
1da177e4c
|
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
{ int ret = ELEVATOR_NO_MERGE; /* * we can merge and sequence is ok, check if it's possible */ if (elv_rq_merge_ok(__rq, bio)) { if (__rq->sector + __rq->nr_sectors == bio->bi_sector) ret = ELEVATOR_BACK_MERGE; else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) ret = ELEVATOR_FRONT_MERGE; } return ret; } |
1da177e4c
|
123 |
|
1da177e4c
|
124 125 |
static struct elevator_type *elevator_find(const char *name) { |
a22b169df
|
126 |
struct elevator_type *e; |
1da177e4c
|
127 |
|
70cee26e0
|
128 |
list_for_each_entry(e, &elv_list, list) { |
a22b169df
|
129 130 |
if (!strcmp(e->elevator_name, name)) return e; |
1da177e4c
|
131 |
} |
1da177e4c
|
132 |
|
a22b169df
|
133 |
return NULL; |
1da177e4c
|
134 135 136 137 138 139 140 141 142 |
} static void elevator_put(struct elevator_type *e) { module_put(e->elevator_owner); } static struct elevator_type *elevator_get(const char *name) { |
2824bc932
|
143 |
struct elevator_type *e; |
1da177e4c
|
144 |
|
2a12dcd71
|
145 |
spin_lock(&elv_list_lock); |
2824bc932
|
146 147 |
e = elevator_find(name); |
e16409496
|
148 149 150 151 152 153 154 155 156 |
if (!e) { char elv[ELV_NAME_MAX + strlen("-iosched")]; spin_unlock(&elv_list_lock); if (!strcmp(name, "anticipatory")) sprintf(elv, "as-iosched"); else sprintf(elv, "%s-iosched", name); |
e180f5949
|
157 |
request_module("%s", elv); |
e16409496
|
158 159 160 |
spin_lock(&elv_list_lock); e = elevator_find(name); } |
2824bc932
|
161 162 |
if (e && !try_module_get(e->elevator_owner)) e = NULL; |
2a12dcd71
|
163 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
164 165 166 |
return e; } |
165125e1e
|
167 168 |
static void *elevator_init_queue(struct request_queue *q, struct elevator_queue *eq) |
1da177e4c
|
169 |
{ |
bb37b94c6
|
170 |
return eq->ops->elevator_init_fn(q); |
bc1c11697
|
171 |
} |
1da177e4c
|
172 |
|
165125e1e
|
173 |
static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, |
bc1c11697
|
174 175 |
void *data) { |
1da177e4c
|
176 |
q->elevator = eq; |
bc1c11697
|
177 |
eq->elevator_data = data; |
1da177e4c
|
178 179 180 |
} static char chosen_elevator[16]; |
5f0039764
|
181 |
static int __init elevator_setup(char *str) |
1da177e4c
|
182 |
{ |
752a3b796
|
183 184 185 186 |
/* * Be backwards-compatible with previous kernels, so users * won't get the wrong elevator. */ |
5f0039764
|
187 |
if (!strcmp(str, "as")) |
752a3b796
|
188 |
strcpy(chosen_elevator, "anticipatory"); |
cff3ba220
|
189 |
else |
5f0039764
|
190 |
strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); |
9b41046cd
|
191 |
return 1; |
1da177e4c
|
192 193 194 |
} __setup("elevator=", elevator_setup); |
3d1ab40f4
|
195 |
static struct kobj_type elv_ktype; |
b374d18a4
|
196 |
static struct elevator_queue *elevator_alloc(struct request_queue *q, |
165125e1e
|
197 |
struct elevator_type *e) |
3d1ab40f4
|
198 |
{ |
b374d18a4
|
199 |
struct elevator_queue *eq; |
9817064b6
|
200 |
int i; |
b374d18a4
|
201 |
eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); |
9817064b6
|
202 203 |
if (unlikely(!eq)) goto err; |
9817064b6
|
204 205 |
eq->ops = &e->ops; eq->elevator_type = e; |
f9cb074bf
|
206 |
kobject_init(&eq->kobj, &elv_ktype); |
9817064b6
|
207 |
mutex_init(&eq->sysfs_lock); |
b5deef901
|
208 209 |
eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL, q->node); |
9817064b6
|
210 211 212 213 214 |
if (!eq->hash) goto err; for (i = 0; i < ELV_HASH_ENTRIES; i++) INIT_HLIST_HEAD(&eq->hash[i]); |
3d1ab40f4
|
215 |
return eq; |
9817064b6
|
216 217 218 219 |
err: kfree(eq); elevator_put(e); return NULL; |
3d1ab40f4
|
220 221 222 223 |
} static void elevator_release(struct kobject *kobj) { |
b374d18a4
|
224 |
struct elevator_queue *e; |
9817064b6
|
225 |
|
b374d18a4
|
226 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
227 |
elevator_put(e->elevator_type); |
9817064b6
|
228 |
kfree(e->hash); |
3d1ab40f4
|
229 230 |
kfree(e); } |
165125e1e
|
231 |
int elevator_init(struct request_queue *q, char *name) |
1da177e4c
|
232 233 234 235 |
{ struct elevator_type *e = NULL; struct elevator_queue *eq; int ret = 0; |
bc1c11697
|
236 |
void *data; |
1da177e4c
|
237 |
|
cb98fc8bb
|
238 239 240 241 |
INIT_LIST_HEAD(&q->queue_head); q->last_merge = NULL; q->end_sector = 0; q->boundary_rq = NULL; |
cb98fc8bb
|
242 |
|
4eb166d98
|
243 244 245 246 247 |
if (name) { e = elevator_get(name); if (!e) return -EINVAL; } |
1da177e4c
|
248 |
|
4eb166d98
|
249 250 251 252 253 254 255 |
if (!e && *chosen_elevator) { e = elevator_get(chosen_elevator); if (!e) printk(KERN_ERR "I/O scheduler %s not found ", chosen_elevator); } |
248d5ca5e
|
256 |
|
4eb166d98
|
257 258 259 260 261 262 263 264 265 |
if (!e) { e = elevator_get(CONFIG_DEFAULT_IOSCHED); if (!e) { printk(KERN_ERR "Default I/O scheduler not found. " \ "Using noop. "); e = elevator_get("noop"); } |
5f0039764
|
266 |
} |
b5deef901
|
267 |
eq = elevator_alloc(q, e); |
3d1ab40f4
|
268 |
if (!eq) |
1da177e4c
|
269 |
return -ENOMEM; |
1da177e4c
|
270 |
|
bc1c11697
|
271 272 |
data = elevator_init_queue(q, eq); if (!data) { |
3d1ab40f4
|
273 |
kobject_put(&eq->kobj); |
bc1c11697
|
274 275 |
return -ENOMEM; } |
1da177e4c
|
276 |
|
bc1c11697
|
277 |
elevator_attach(q, eq, data); |
1da177e4c
|
278 279 |
return ret; } |
2e662b65f
|
280 |
EXPORT_SYMBOL(elevator_init); |
b374d18a4
|
281 |
void elevator_exit(struct elevator_queue *e) |
1da177e4c
|
282 |
{ |
3d1ab40f4
|
283 |
mutex_lock(&e->sysfs_lock); |
1da177e4c
|
284 285 |
if (e->ops->elevator_exit_fn) e->ops->elevator_exit_fn(e); |
3d1ab40f4
|
286 287 |
e->ops = NULL; mutex_unlock(&e->sysfs_lock); |
1da177e4c
|
288 |
|
3d1ab40f4
|
289 |
kobject_put(&e->kobj); |
1da177e4c
|
290 |
} |
2e662b65f
|
291 |
EXPORT_SYMBOL(elevator_exit); |
165125e1e
|
292 |
static void elv_activate_rq(struct request_queue *q, struct request *rq) |
cad975164
|
293 |
{ |
b374d18a4
|
294 |
struct elevator_queue *e = q->elevator; |
cad975164
|
295 296 297 298 |
if (e->ops->elevator_activate_req_fn) e->ops->elevator_activate_req_fn(q, rq); } |
165125e1e
|
299 |
static void elv_deactivate_rq(struct request_queue *q, struct request *rq) |
cad975164
|
300 |
{ |
b374d18a4
|
301 |
struct elevator_queue *e = q->elevator; |
cad975164
|
302 303 304 305 |
if (e->ops->elevator_deactivate_req_fn) e->ops->elevator_deactivate_req_fn(q, rq); } |
9817064b6
|
306 307 308 309 |
static inline void __elv_rqhash_del(struct request *rq) { hlist_del_init(&rq->hash); } |
165125e1e
|
310 |
static void elv_rqhash_del(struct request_queue *q, struct request *rq) |
9817064b6
|
311 312 313 314 |
{ if (ELV_ON_HASH(rq)) __elv_rqhash_del(rq); } |
165125e1e
|
315 |
static void elv_rqhash_add(struct request_queue *q, struct request *rq) |
9817064b6
|
316 |
{ |
b374d18a4
|
317 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
318 319 320 321 |
BUG_ON(ELV_ON_HASH(rq)); hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); } |
165125e1e
|
322 |
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
9817064b6
|
323 324 325 326 |
{ __elv_rqhash_del(rq); elv_rqhash_add(q, rq); } |
165125e1e
|
327 |
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
9817064b6
|
328 |
{ |
b374d18a4
|
329 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 |
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; struct hlist_node *entry, *next; struct request *rq; hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { BUG_ON(!ELV_ON_HASH(rq)); if (unlikely(!rq_mergeable(rq))) { __elv_rqhash_del(rq); continue; } if (rq_hash_key(rq) == offset) return rq; } return NULL; } |
8922e16cf
|
348 |
/* |
2e662b65f
|
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 |
* RB-tree support functions for inserting/lookup/removal of requests * in a sorted RB tree. */ struct request *elv_rb_add(struct rb_root *root, struct request *rq) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct request *__rq; while (*p) { parent = *p; __rq = rb_entry(parent, struct request, rb_node); if (rq->sector < __rq->sector) p = &(*p)->rb_left; else if (rq->sector > __rq->sector) p = &(*p)->rb_right; else return __rq; } rb_link_node(&rq->rb_node, parent, p); rb_insert_color(&rq->rb_node, root); return NULL; } |
2e662b65f
|
374 375 376 377 378 379 380 381 |
EXPORT_SYMBOL(elv_rb_add); void elv_rb_del(struct rb_root *root, struct request *rq) { BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); rb_erase(&rq->rb_node, root); RB_CLEAR_NODE(&rq->rb_node); } |
2e662b65f
|
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 |
EXPORT_SYMBOL(elv_rb_del); struct request *elv_rb_find(struct rb_root *root, sector_t sector) { struct rb_node *n = root->rb_node; struct request *rq; while (n) { rq = rb_entry(n, struct request, rb_node); if (sector < rq->sector) n = n->rb_left; else if (sector > rq->sector) n = n->rb_right; else return rq; } return NULL; } |
2e662b65f
|
402 403 404 |
EXPORT_SYMBOL(elv_rb_find); /* |
8922e16cf
|
405 |
* Insert rq into dispatch queue of q. Queue lock must be held on |
dbe7f76dd
|
406 |
* entry. rq is sort instead into the dispatch queue. To be used by |
2e662b65f
|
407 |
* specific elevators. |
8922e16cf
|
408 |
*/ |
165125e1e
|
409 |
void elv_dispatch_sort(struct request_queue *q, struct request *rq) |
8922e16cf
|
410 411 |
{ sector_t boundary; |
8922e16cf
|
412 |
struct list_head *entry; |
4eb166d98
|
413 |
int stop_flags; |
8922e16cf
|
414 |
|
06b86245c
|
415 416 |
if (q->last_merge == rq) q->last_merge = NULL; |
9817064b6
|
417 418 |
elv_rqhash_del(q, rq); |
15853af9f
|
419 |
q->nr_sorted--; |
06b86245c
|
420 |
|
1b47f531e
|
421 |
boundary = q->end_sector; |
4eb166d98
|
422 |
stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; |
8922e16cf
|
423 424 |
list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); |
e17fc0a1c
|
425 426 |
if (blk_discard_rq(rq) != blk_discard_rq(pos)) break; |
783660b2f
|
427 428 |
if (rq_data_dir(rq) != rq_data_dir(pos)) break; |
4eb166d98
|
429 |
if (pos->cmd_flags & stop_flags) |
8922e16cf
|
430 431 432 433 434 435 436 437 438 439 440 441 442 443 |
break; if (rq->sector >= boundary) { if (pos->sector < boundary) continue; } else { if (pos->sector >= boundary) break; } if (rq->sector >= pos->sector) break; } list_add(&rq->queuelist, entry); } |
2e662b65f
|
444 |
EXPORT_SYMBOL(elv_dispatch_sort); |
9817064b6
|
445 |
/* |
2e662b65f
|
446 447 448 |
* Insert rq into dispatch queue of q. Queue lock must be held on * entry. rq is added to the back of the dispatch queue. To be used by * specific elevators. |
9817064b6
|
449 450 451 452 453 454 455 456 457 458 459 460 461 462 |
*/ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) { if (q->last_merge == rq) q->last_merge = NULL; elv_rqhash_del(q, rq); q->nr_sorted--; q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; list_add_tail(&rq->queuelist, &q->queue_head); } |
2e662b65f
|
463 |
EXPORT_SYMBOL(elv_dispatch_add_tail); |
165125e1e
|
464 |
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) |
1da177e4c
|
465 |
{ |
b374d18a4
|
466 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
467 |
struct request *__rq; |
06b86245c
|
468 |
int ret; |
9817064b6
|
469 470 471 |
/* * First try one-hit cache. */ |
06b86245c
|
472 473 474 475 476 477 478 |
if (q->last_merge) { ret = elv_try_merge(q->last_merge, bio); if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; return ret; } } |
1da177e4c
|
479 |
|
ac9fafa12
|
480 481 |
if (blk_queue_nomerges(q)) return ELEVATOR_NO_MERGE; |
9817064b6
|
482 483 484 485 486 487 488 489 |
/* * See if our hash lookup can find a potential backmerge. */ __rq = elv_rqhash_find(q, bio->bi_sector); if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_BACK_MERGE; } |
1da177e4c
|
490 491 492 493 494 |
if (e->ops->elevator_merge_fn) return e->ops->elevator_merge_fn(q, req, bio); return ELEVATOR_NO_MERGE; } |
165125e1e
|
495 |
void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
1da177e4c
|
496 |
{ |
b374d18a4
|
497 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
498 499 |
if (e->ops->elevator_merged_fn) |
2e662b65f
|
500 |
e->ops->elevator_merged_fn(q, rq, type); |
06b86245c
|
501 |
|
2e662b65f
|
502 503 |
if (type == ELEVATOR_BACK_MERGE) elv_rqhash_reposition(q, rq); |
9817064b6
|
504 |
|
06b86245c
|
505 |
q->last_merge = rq; |
1da177e4c
|
506 |
} |
165125e1e
|
507 |
void elv_merge_requests(struct request_queue *q, struct request *rq, |
1da177e4c
|
508 509 |
struct request *next) { |
b374d18a4
|
510 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
511 |
|
1da177e4c
|
512 513 |
if (e->ops->elevator_merge_req_fn) e->ops->elevator_merge_req_fn(q, rq, next); |
06b86245c
|
514 |
|
9817064b6
|
515 516 517 518 |
elv_rqhash_reposition(q, rq); elv_rqhash_del(q, next); q->nr_sorted--; |
06b86245c
|
519 |
q->last_merge = rq; |
1da177e4c
|
520 |
} |
165125e1e
|
521 |
void elv_requeue_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
522 |
{ |
1da177e4c
|
523 524 525 526 |
/* * it already went through dequeue, we need to decrement the * in_flight count again */ |
8922e16cf
|
527 |
if (blk_account_rq(rq)) { |
1da177e4c
|
528 |
q->in_flight--; |
cad975164
|
529 530 |
if (blk_sorted_rq(rq)) elv_deactivate_rq(q, rq); |
8922e16cf
|
531 |
} |
1da177e4c
|
532 |
|
4aff5e233
|
533 |
rq->cmd_flags &= ~REQ_STARTED; |
1da177e4c
|
534 |
|
30e9656cc
|
535 |
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); |
1da177e4c
|
536 |
} |
165125e1e
|
537 |
static void elv_drain_elevator(struct request_queue *q) |
15853af9f
|
538 539 540 541 542 543 544 545 546 547 548 549 550 |
{ static int printed; while (q->elevator->ops->elevator_dispatch_fn(q, 1)) ; if (q->nr_sorted == 0) return; if (printed++ < 10) { printk(KERN_ERR "%s: forced dispatching is broken " "(nr_sorted=%u), please report this ", q->elevator->elevator_type->elevator_name, q->nr_sorted); } } |
165125e1e
|
551 |
void elv_insert(struct request_queue *q, struct request *rq, int where) |
1da177e4c
|
552 |
{ |
797e7dbbe
|
553 554 |
struct list_head *pos; unsigned ordseq; |
dac07ec12
|
555 |
int unplug_it = 1; |
797e7dbbe
|
556 |
|
5f3ea37c7
|
557 |
trace_block_rq_insert(q, rq); |
2056a782f
|
558 |
|
1da177e4c
|
559 |
rq->q = q; |
8922e16cf
|
560 561 |
switch (where) { case ELEVATOR_INSERT_FRONT: |
4aff5e233
|
562 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
8922e16cf
|
563 564 565 566 567 |
list_add(&rq->queuelist, &q->queue_head); break; case ELEVATOR_INSERT_BACK: |
4aff5e233
|
568 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
15853af9f
|
569 |
elv_drain_elevator(q); |
8922e16cf
|
570 571 572 573 574 575 576 577 578 579 580 581 |
list_add_tail(&rq->queuelist, &q->queue_head); /* * We kick the queue here for the following reasons. * - The elevator might have returned NULL previously * to delay requests and returned them now. As the * queue wasn't empty before this request, ll_rw_blk * won't run the queue on return, resulting in hang. * - Usually, back inserted requests won't be merged * with anything. There's no point in delaying queue * processing. */ blk_remove_plug(q); |
80a4b58e3
|
582 |
blk_start_queueing(q); |
8922e16cf
|
583 584 585 |
break; case ELEVATOR_INSERT_SORT: |
e17fc0a1c
|
586 |
BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq)); |
4aff5e233
|
587 |
rq->cmd_flags |= REQ_SORTED; |
15853af9f
|
588 |
q->nr_sorted++; |
9817064b6
|
589 590 591 592 593 |
if (rq_mergeable(rq)) { elv_rqhash_add(q, rq); if (!q->last_merge) q->last_merge = rq; } |
ca23509fb
|
594 595 596 597 598 599 |
/* * Some ioscheds (cfq) run q->request_fn directly, so * rq cannot be accessed after calling * elevator_add_req_fn. */ q->elevator->ops->elevator_add_req_fn(q, rq); |
8922e16cf
|
600 |
break; |
797e7dbbe
|
601 602 603 604 605 606 |
case ELEVATOR_INSERT_REQUEUE: /* * If ordered flush isn't in progress, we do front * insertion; otherwise, requests should be requeued * in ordseq order. */ |
4aff5e233
|
607 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
797e7dbbe
|
608 |
|
95543179f
|
609 610 611 612 613 |
/* * Most requeues happen because of a busy condition, * don't force unplug of the queue for that case. */ unplug_it = 0; |
797e7dbbe
|
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 |
if (q->ordseq == 0) { list_add(&rq->queuelist, &q->queue_head); break; } ordseq = blk_ordered_req_seq(rq); list_for_each(pos, &q->queue_head) { struct request *pos_rq = list_entry_rq(pos); if (ordseq <= blk_ordered_req_seq(pos_rq)) break; } list_add_tail(&rq->queuelist, pos); break; |
8922e16cf
|
629 630 631 |
default: printk(KERN_ERR "%s: bad insertion point %d ", |
24c03d47d
|
632 |
__func__, where); |
8922e16cf
|
633 634 |
BUG(); } |
dac07ec12
|
635 |
if (unplug_it && blk_queue_plugged(q)) { |
8922e16cf
|
636 637 638 639 640 641 |
int nrq = q->rq.count[READ] + q->rq.count[WRITE] - q->in_flight; if (nrq >= q->unplug_thresh) __generic_unplug_device(q); } |
1da177e4c
|
642 |
} |
165125e1e
|
643 |
void __elv_add_request(struct request_queue *q, struct request *rq, int where, |
30e9656cc
|
644 645 646 |
int plug) { if (q->ordcolor) |
4aff5e233
|
647 |
rq->cmd_flags |= REQ_ORDERED_COLOR; |
30e9656cc
|
648 |
|
4aff5e233
|
649 |
if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { |
30e9656cc
|
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 |
/* * toggle ordered color */ if (blk_barrier_rq(rq)) q->ordcolor ^= 1; /* * barriers implicitly indicate back insertion */ if (where == ELEVATOR_INSERT_SORT) where = ELEVATOR_INSERT_BACK; /* * this request is scheduling boundary, update * end_sector */ |
e17fc0a1c
|
666 |
if (blk_fs_request(rq) || blk_discard_rq(rq)) { |
30e9656cc
|
667 668 669 |
q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; } |
4eb166d98
|
670 671 |
} else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) |
30e9656cc
|
672 673 674 675 676 677 678 |
where = ELEVATOR_INSERT_BACK; if (plug) blk_plug_device(q); elv_insert(q, rq, where); } |
2e662b65f
|
679 |
EXPORT_SYMBOL(__elv_add_request); |
165125e1e
|
680 |
void elv_add_request(struct request_queue *q, struct request *rq, int where, |
1da177e4c
|
681 682 683 684 685 686 687 688 |
int plug) { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); __elv_add_request(q, rq, where, plug); spin_unlock_irqrestore(q->queue_lock, flags); } |
2e662b65f
|
689 |
EXPORT_SYMBOL(elv_add_request); |
165125e1e
|
690 |
static inline struct request *__elv_next_request(struct request_queue *q) |
1da177e4c
|
691 |
{ |
8922e16cf
|
692 |
struct request *rq; |
797e7dbbe
|
693 694 695 696 697 698 |
while (1) { while (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); if (blk_do_ordered(q, &rq)) return rq; } |
1da177e4c
|
699 |
|
797e7dbbe
|
700 701 |
if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) return NULL; |
1da177e4c
|
702 |
} |
1da177e4c
|
703 |
} |
165125e1e
|
704 |
struct request *elv_next_request(struct request_queue *q) |
1da177e4c
|
705 706 707 708 709 |
{ struct request *rq; int ret; while ((rq = __elv_next_request(q)) != NULL) { |
4aff5e233
|
710 |
if (!(rq->cmd_flags & REQ_STARTED)) { |
8922e16cf
|
711 712 713 714 715 |
/* * This is the first time the device driver * sees this request (possibly after * requeueing). Notify IO scheduler. */ |
cad975164
|
716 717 |
if (blk_sorted_rq(rq)) elv_activate_rq(q, rq); |
1da177e4c
|
718 |
|
8922e16cf
|
719 720 721 722 723 |
/* * just mark as started even if we don't start * it, a request that has been delayed should * not be passed by new incoming requests */ |
4aff5e233
|
724 |
rq->cmd_flags |= REQ_STARTED; |
5f3ea37c7
|
725 |
trace_block_rq_issue(q, rq); |
8922e16cf
|
726 |
} |
1da177e4c
|
727 |
|
8922e16cf
|
728 |
if (!q->boundary_rq || q->boundary_rq == rq) { |
1b47f531e
|
729 |
q->end_sector = rq_end_sector(rq); |
8922e16cf
|
730 731 |
q->boundary_rq = NULL; } |
1da177e4c
|
732 |
|
fa0ccd837
|
733 734 735 736 737 738 739 740 741 742 743 |
if (rq->cmd_flags & REQ_DONTPREP) break; if (q->dma_drain_size && rq->data_len) { /* * make sure space for the drain appears we * know we can do this because max_hw_segments * has been adjusted to be one fewer than the * device can handle */ rq->nr_phys_segments++; |
fa0ccd837
|
744 745 746 |
} if (!q->prep_rq_fn) |
1da177e4c
|
747 748 749 750 751 752 |
break; ret = q->prep_rq_fn(q, rq); if (ret == BLKPREP_OK) { break; } else if (ret == BLKPREP_DEFER) { |
2e759cd4f
|
753 754 755 |
/* * the request may have been (partially) prepped. * we need to keep this request in the front to |
8922e16cf
|
756 757 |
* avoid resource deadlock. REQ_STARTED will * prevent other fs requests from passing this one. |
2e759cd4f
|
758 |
*/ |
fa0ccd837
|
759 760 761 762 763 764 765 |
if (q->dma_drain_size && rq->data_len && !(rq->cmd_flags & REQ_DONTPREP)) { /* * remove the space for the drain we added * so that we don't add it again */ --rq->nr_phys_segments; |
fa0ccd837
|
766 |
} |
1da177e4c
|
767 768 769 |
rq = NULL; break; } else if (ret == BLKPREP_KILL) { |
4aff5e233
|
770 |
rq->cmd_flags |= REQ_QUIET; |
99cd3386f
|
771 |
__blk_end_request(rq, -EIO, blk_rq_bytes(rq)); |
1da177e4c
|
772 |
} else { |
24c03d47d
|
773 774 |
printk(KERN_ERR "%s: bad return=%d ", __func__, ret); |
1da177e4c
|
775 776 777 778 779 780 |
break; } } return rq; } |
2e662b65f
|
781 |
EXPORT_SYMBOL(elv_next_request); |
165125e1e
|
782 |
void elv_dequeue_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
783 |
{ |
8922e16cf
|
784 |
BUG_ON(list_empty(&rq->queuelist)); |
9817064b6
|
785 |
BUG_ON(ELV_ON_HASH(rq)); |
8922e16cf
|
786 787 |
list_del_init(&rq->queuelist); |
1da177e4c
|
788 789 790 791 |
/* * the time frame between a request being removed from the lists * and to it is freed is accounted as io that is in progress at |
8922e16cf
|
792 |
* the driver side. |
1da177e4c
|
793 794 795 |
*/ if (blk_account_rq(rq)) q->in_flight++; |
1da177e4c
|
796 |
} |
2e662b65f
|
797 |
|
165125e1e
|
798 |
int elv_queue_empty(struct request_queue *q) |
1da177e4c
|
799 |
{ |
b374d18a4
|
800 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
801 |
|
8922e16cf
|
802 803 |
if (!list_empty(&q->queue_head)) return 0; |
1da177e4c
|
804 805 |
if (e->ops->elevator_queue_empty_fn) return e->ops->elevator_queue_empty_fn(q); |
8922e16cf
|
806 |
return 1; |
1da177e4c
|
807 |
} |
2e662b65f
|
808 |
EXPORT_SYMBOL(elv_queue_empty); |
165125e1e
|
809 |
struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
810 |
{ |
b374d18a4
|
811 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
812 813 814 |
if (e->ops->elevator_latter_req_fn) return e->ops->elevator_latter_req_fn(q, rq); |
1da177e4c
|
815 816 |
return NULL; } |
165125e1e
|
817 |
struct request *elv_former_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
818 |
{ |
b374d18a4
|
819 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
820 821 822 |
if (e->ops->elevator_former_req_fn) return e->ops->elevator_former_req_fn(q, rq); |
1da177e4c
|
823 824 |
return NULL; } |
165125e1e
|
825 |
int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
1da177e4c
|
826 |
{ |
b374d18a4
|
827 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
828 829 |
if (e->ops->elevator_set_req_fn) |
cb78b285c
|
830 |
return e->ops->elevator_set_req_fn(q, rq, gfp_mask); |
1da177e4c
|
831 832 833 834 |
rq->elevator_private = NULL; return 0; } |
165125e1e
|
835 |
void elv_put_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
836 |
{ |
b374d18a4
|
837 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
838 839 |
if (e->ops->elevator_put_req_fn) |
bb37b94c6
|
840 |
e->ops->elevator_put_req_fn(rq); |
1da177e4c
|
841 |
} |
165125e1e
|
842 |
int elv_may_queue(struct request_queue *q, int rw) |
1da177e4c
|
843 |
{ |
b374d18a4
|
844 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
845 846 |
if (e->ops->elevator_may_queue_fn) |
cb78b285c
|
847 |
return e->ops->elevator_may_queue_fn(q, rw); |
1da177e4c
|
848 849 850 |
return ELV_MQUEUE_MAY; } |
11914a53d
|
851 852 853 854 855 856 857 |
void elv_abort_queue(struct request_queue *q) { struct request *rq; while (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); rq->cmd_flags |= REQ_QUIET; |
5f3ea37c7
|
858 |
trace_block_rq_abort(q, rq); |
99cd3386f
|
859 |
__blk_end_request(rq, -EIO, blk_rq_bytes(rq)); |
11914a53d
|
860 861 862 |
} } EXPORT_SYMBOL(elv_abort_queue); |
165125e1e
|
863 |
void elv_completed_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
864 |
{ |
b374d18a4
|
865 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
866 867 868 869 |
/* * request is released from the driver, io must be done */ |
8922e16cf
|
870 |
if (blk_account_rq(rq)) { |
1da177e4c
|
871 |
q->in_flight--; |
1bc691d35
|
872 873 874 |
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) e->ops->elevator_completed_req_fn(q, rq); } |
797e7dbbe
|
875 |
|
1bc691d35
|
876 877 878 879 880 |
/* * Check if the queue is waiting for fs requests to be * drained for flush sequence. */ if (unlikely(q->ordseq)) { |
8f11b3e99
|
881 882 883 884 885 886 |
struct request *next = NULL; if (!list_empty(&q->queue_head)) next = list_entry_rq(q->queue_head.next); if (!q->in_flight && |
797e7dbbe
|
887 |
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && |
8f11b3e99
|
888 |
(!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { |
797e7dbbe
|
889 |
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); |
80a4b58e3
|
890 |
blk_start_queueing(q); |
797e7dbbe
|
891 |
} |
8922e16cf
|
892 |
} |
1da177e4c
|
893 |
} |
3d1ab40f4
|
894 895 896 897 |
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) static ssize_t elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
1da177e4c
|
898 |
{ |
3d1ab40f4
|
899 |
struct elv_fs_entry *entry = to_elv(attr); |
b374d18a4
|
900 |
struct elevator_queue *e; |
3d1ab40f4
|
901 902 903 904 |
ssize_t error; if (!entry->show) return -EIO; |
b374d18a4
|
905 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
906 907 908 909 910 |
mutex_lock(&e->sysfs_lock); error = e->ops ? entry->show(e, page) : -ENOENT; mutex_unlock(&e->sysfs_lock); return error; } |
1da177e4c
|
911 |
|
3d1ab40f4
|
912 913 914 915 |
static ssize_t elv_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { |
3d1ab40f4
|
916 |
struct elv_fs_entry *entry = to_elv(attr); |
b374d18a4
|
917 |
struct elevator_queue *e; |
3d1ab40f4
|
918 |
ssize_t error; |
1da177e4c
|
919 |
|
3d1ab40f4
|
920 921 |
if (!entry->store) return -EIO; |
1da177e4c
|
922 |
|
b374d18a4
|
923 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 |
mutex_lock(&e->sysfs_lock); error = e->ops ? entry->store(e, page, length) : -ENOENT; mutex_unlock(&e->sysfs_lock); return error; } static struct sysfs_ops elv_sysfs_ops = { .show = elv_attr_show, .store = elv_attr_store, }; static struct kobj_type elv_ktype = { .sysfs_ops = &elv_sysfs_ops, .release = elevator_release, }; int elv_register_queue(struct request_queue *q) { |
b374d18a4
|
942 |
struct elevator_queue *e = q->elevator; |
3d1ab40f4
|
943 |
int error; |
b2d6db587
|
944 |
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
3d1ab40f4
|
945 |
if (!error) { |
e572ec7e4
|
946 |
struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; |
3d1ab40f4
|
947 |
if (attr) { |
e572ec7e4
|
948 949 |
while (attr->attr.name) { if (sysfs_create_file(&e->kobj, &attr->attr)) |
3d1ab40f4
|
950 |
break; |
e572ec7e4
|
951 |
attr++; |
3d1ab40f4
|
952 953 954 955 956 |
} } kobject_uevent(&e->kobj, KOBJ_ADD); } return error; |
1da177e4c
|
957 |
} |
b374d18a4
|
958 |
static void __elv_unregister_queue(struct elevator_queue *e) |
bc1c11697
|
959 960 961 962 |
{ kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); } |
1da177e4c
|
963 964 |
void elv_unregister_queue(struct request_queue *q) { |
bc1c11697
|
965 966 |
if (q) __elv_unregister_queue(q->elevator); |
1da177e4c
|
967 |
} |
2fdd82bd8
|
968 |
void elv_register(struct elevator_type *e) |
1da177e4c
|
969 |
{ |
1ffb96c58
|
970 |
char *def = ""; |
2a12dcd71
|
971 972 |
spin_lock(&elv_list_lock); |
ce5244974
|
973 |
BUG_ON(elevator_find(e->elevator_name)); |
1da177e4c
|
974 |
list_add_tail(&e->list, &elv_list); |
2a12dcd71
|
975 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
976 |
|
5f0039764
|
977 978 979 |
if (!strcmp(e->elevator_name, chosen_elevator) || (!*chosen_elevator && !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) |
1ffb96c58
|
980 |
def = " (default)"; |
4eb166d98
|
981 982 983 |
printk(KERN_INFO "io scheduler %s registered%s ", e->elevator_name, def); |
1da177e4c
|
984 985 986 987 988 |
} EXPORT_SYMBOL_GPL(elv_register); void elv_unregister(struct elevator_type *e) { |
83521d3eb
|
989 990 991 992 993 |
struct task_struct *g, *p; /* * Iterate every thread in the process to remove the io contexts. */ |
e17a9489b
|
994 995 996 997 |
if (e->ops.trim) { read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); |
2d8f61316
|
998 999 |
if (p->io_context) e->ops.trim(p->io_context); |
e17a9489b
|
1000 1001 1002 1003 |
task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } |
83521d3eb
|
1004 |
|
2a12dcd71
|
1005 |
spin_lock(&elv_list_lock); |
1da177e4c
|
1006 |
list_del_init(&e->list); |
2a12dcd71
|
1007 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
1008 1009 1010 1011 1012 1013 1014 |
} EXPORT_SYMBOL_GPL(elv_unregister); /* * switch to new_e io scheduler. be careful not to introduce deadlocks - * we don't free the old io scheduler, before we have allocated what we * need for the new one. this way we have a chance of going back to the old |
cb98fc8bb
|
1015 |
* one, if the new one fails init for some reason. |
1da177e4c
|
1016 |
*/ |
165125e1e
|
1017 |
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
1da177e4c
|
1018 |
{ |
b374d18a4
|
1019 |
struct elevator_queue *old_elevator, *e; |
bc1c11697
|
1020 |
void *data; |
1da177e4c
|
1021 |
|
cb98fc8bb
|
1022 1023 1024 |
/* * Allocate new elevator */ |
b5deef901
|
1025 |
e = elevator_alloc(q, new_e); |
1da177e4c
|
1026 |
if (!e) |
3d1ab40f4
|
1027 |
return 0; |
1da177e4c
|
1028 |
|
bc1c11697
|
1029 1030 1031 1032 1033 |
data = elevator_init_queue(q, e); if (!data) { kobject_put(&e->kobj); return 0; } |
1da177e4c
|
1034 |
/* |
cb98fc8bb
|
1035 |
* Turn on BYPASS and drain all requests w/ elevator private data |
1da177e4c
|
1036 |
*/ |
cb98fc8bb
|
1037 |
spin_lock_irq(q->queue_lock); |
75ad23bc0
|
1038 |
queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); |
cb98fc8bb
|
1039 |
|
15853af9f
|
1040 |
elv_drain_elevator(q); |
cb98fc8bb
|
1041 1042 |
while (q->rq.elvpriv) { |
80a4b58e3
|
1043 |
blk_start_queueing(q); |
cb98fc8bb
|
1044 |
spin_unlock_irq(q->queue_lock); |
64521d1a3
|
1045 |
msleep(10); |
cb98fc8bb
|
1046 |
spin_lock_irq(q->queue_lock); |
15853af9f
|
1047 |
elv_drain_elevator(q); |
cb98fc8bb
|
1048 |
} |
1da177e4c
|
1049 |
/* |
bc1c11697
|
1050 |
* Remember old elevator. |
1da177e4c
|
1051 |
*/ |
1da177e4c
|
1052 1053 1054 |
old_elevator = q->elevator; /* |
1da177e4c
|
1055 1056 |
* attach and start new elevator */ |
bc1c11697
|
1057 1058 1059 1060 1061 |
elevator_attach(q, e, data); spin_unlock_irq(q->queue_lock); __elv_unregister_queue(old_elevator); |
1da177e4c
|
1062 1063 1064 1065 1066 |
if (elv_register_queue(q)) goto fail_register; /* |
cb98fc8bb
|
1067 |
* finally exit old elevator and turn off BYPASS. |
1da177e4c
|
1068 1069 |
*/ elevator_exit(old_elevator); |
75ad23bc0
|
1070 1071 1072 |
spin_lock_irq(q->queue_lock); queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); spin_unlock_irq(q->queue_lock); |
4722dc52a
|
1073 |
blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); |
3d1ab40f4
|
1074 |
return 1; |
1da177e4c
|
1075 1076 1077 1078 1079 1080 1081 |
fail_register: /* * switch failed, exit the new io scheduler and reattach the old * one again (along with re-adding the sysfs dir) */ elevator_exit(e); |
1da177e4c
|
1082 1083 |
q->elevator = old_elevator; elv_register_queue(q); |
75ad23bc0
|
1084 1085 1086 1087 |
spin_lock_irq(q->queue_lock); queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); spin_unlock_irq(q->queue_lock); |
3d1ab40f4
|
1088 |
return 0; |
1da177e4c
|
1089 |
} |
165125e1e
|
1090 1091 |
ssize_t elv_iosched_store(struct request_queue *q, const char *name, size_t count) |
1da177e4c
|
1092 1093 1094 |
{ char elevator_name[ELV_NAME_MAX]; struct elevator_type *e; |
ee2e992cc
|
1095 1096 |
strlcpy(elevator_name, name, sizeof(elevator_name)); strstrip(elevator_name); |
1da177e4c
|
1097 1098 1099 1100 1101 1102 1103 |
e = elevator_get(elevator_name); if (!e) { printk(KERN_ERR "elevator: type %s not found ", elevator_name); return -EINVAL; } |
2ca7d93bb
|
1104 1105 |
if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { elevator_put(e); |
1da177e4c
|
1106 |
return count; |
2ca7d93bb
|
1107 |
} |
1da177e4c
|
1108 |
|
3d1ab40f4
|
1109 |
if (!elevator_switch(q, e)) |
4eb166d98
|
1110 1111 1112 |
printk(KERN_ERR "elevator: switch to %s failed ", elevator_name); |
1da177e4c
|
1113 1114 |
return count; } |
165125e1e
|
1115 |
ssize_t elv_iosched_show(struct request_queue *q, char *name) |
1da177e4c
|
1116 |
{ |
b374d18a4
|
1117 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
1118 |
struct elevator_type *elv = e->elevator_type; |
70cee26e0
|
1119 |
struct elevator_type *__e; |
1da177e4c
|
1120 |
int len = 0; |
2a12dcd71
|
1121 |
spin_lock(&elv_list_lock); |
70cee26e0
|
1122 |
list_for_each_entry(__e, &elv_list, list) { |
1da177e4c
|
1123 1124 1125 1126 1127 |
if (!strcmp(elv->elevator_name, __e->elevator_name)) len += sprintf(name+len, "[%s] ", elv->elevator_name); else len += sprintf(name+len, "%s ", __e->elevator_name); } |
2a12dcd71
|
1128 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
1129 1130 1131 1132 1133 |
len += sprintf(len+name, " "); return len; } |
165125e1e
|
1134 1135 |
struct request *elv_rb_former_request(struct request_queue *q, struct request *rq) |
2e662b65f
|
1136 1137 1138 1139 1140 1141 1142 1143 |
{ struct rb_node *rbprev = rb_prev(&rq->rb_node); if (rbprev) return rb_entry_rq(rbprev); return NULL; } |
2e662b65f
|
1144 |
EXPORT_SYMBOL(elv_rb_former_request); |
165125e1e
|
1145 1146 |
struct request *elv_rb_latter_request(struct request_queue *q, struct request *rq) |
2e662b65f
|
1147 1148 1149 1150 1151 1152 1153 1154 |
{ struct rb_node *rbnext = rb_next(&rq->rb_node); if (rbnext) return rb_entry_rq(rbnext); return NULL; } |
2e662b65f
|
1155 |
EXPORT_SYMBOL(elv_rb_latter_request); |