Blame view
block/elevator.c
24.1 KB
1da177e4c
|
1 |
/* |
1da177e4c
|
2 3 4 5 |
* Block device elevator/IO-scheduler. * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * |
0fe234795
|
6 |
* 30042000 Jens Axboe <axboe@kernel.dk> : |
1da177e4c
|
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
* * Split the elevator a bit so that it is possible to choose a different * one or even write a new "plug in". There are three pieces: * - elevator_fn, inserts a new request in the queue list * - elevator_merge_fn, decides whether a new buffer can be merged with * an existing request * - elevator_dequeue_fn, called when a request is taken off the active list * * 20082000 Dave Jones <davej@suse.de> : * Removed tests for max-bomb-segments, which was breaking elvtune * when run without -bN * * Jens: * - Rework again to work with bio instead of buffer_heads * - loose bi_dev comparisons, partition handling is right now * - completely modularize elevator setup and teardown * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> |
1da177e4c
|
30 31 32 33 |
#include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/compiler.h> |
2056a782f
|
34 |
#include <linux/blktrace_api.h> |
9817064b6
|
35 |
#include <linux/hash.h> |
0835da67c
|
36 |
#include <linux/uaccess.h> |
c8158819d
|
37 |
#include <linux/pm_runtime.h> |
1da177e4c
|
38 |
|
55782138e
|
39 |
#include <trace/events/block.h> |
242f9dcb8
|
40 |
#include "blk.h" |
72e06c255
|
41 |
#include "blk-cgroup.h" |
242f9dcb8
|
42 |
|
1da177e4c
|
43 44 45 46 |
static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); /* |
9817064b6
|
47 48 |
* Merge hash stuff. */ |
83096ebf1
|
49 |
#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
9817064b6
|
50 51 |
/* |
da7752650
|
52 53 54 55 56 |
* Query io scheduler to see if the current process issuing bio may be * merged with rq. */ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) { |
165125e1e
|
57 |
struct request_queue *q = rq->q; |
b374d18a4
|
58 |
struct elevator_queue *e = q->elevator; |
da7752650
|
59 |
|
22f746e23
|
60 61 |
if (e->type->ops.elevator_allow_merge_fn) return e->type->ops.elevator_allow_merge_fn(q, rq, bio); |
da7752650
|
62 63 64 65 66 |
return 1; } /* |
1da177e4c
|
67 68 |
* can we safely merge with this request? */ |
050c8ea80
|
69 |
bool elv_rq_merge_ok(struct request *rq, struct bio *bio) |
1da177e4c
|
70 |
{ |
050c8ea80
|
71 |
if (!blk_rq_merge_ok(rq, bio)) |
7ba1ba12e
|
72 |
return 0; |
da7752650
|
73 74 |
if (!elv_iosched_allow_merge(rq, bio)) return 0; |
1da177e4c
|
75 |
|
da7752650
|
76 |
return 1; |
1da177e4c
|
77 78 |
} EXPORT_SYMBOL(elv_rq_merge_ok); |
1da177e4c
|
79 80 |
static struct elevator_type *elevator_find(const char *name) { |
a22b169df
|
81 |
struct elevator_type *e; |
1da177e4c
|
82 |
|
70cee26e0
|
83 |
list_for_each_entry(e, &elv_list, list) { |
a22b169df
|
84 85 |
if (!strcmp(e->elevator_name, name)) return e; |
1da177e4c
|
86 |
} |
1da177e4c
|
87 |
|
a22b169df
|
88 |
return NULL; |
1da177e4c
|
89 90 91 92 93 94 |
} static void elevator_put(struct elevator_type *e) { module_put(e->elevator_owner); } |
21c3c5d28
|
95 |
static struct elevator_type *elevator_get(const char *name, bool try_loading) |
1da177e4c
|
96 |
{ |
2824bc932
|
97 |
struct elevator_type *e; |
1da177e4c
|
98 |
|
2a12dcd71
|
99 |
spin_lock(&elv_list_lock); |
2824bc932
|
100 101 |
e = elevator_find(name); |
21c3c5d28
|
102 |
if (!e && try_loading) { |
e16409496
|
103 |
spin_unlock(&elv_list_lock); |
490b94be0
|
104 |
request_module("%s-iosched", name); |
e16409496
|
105 106 107 |
spin_lock(&elv_list_lock); e = elevator_find(name); } |
2824bc932
|
108 109 |
if (e && !try_module_get(e->elevator_owner)) e = NULL; |
2a12dcd71
|
110 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
111 112 113 |
return e; } |
484fc254b
|
114 |
static char chosen_elevator[ELV_NAME_MAX]; |
1da177e4c
|
115 |
|
5f0039764
|
116 |
static int __init elevator_setup(char *str) |
1da177e4c
|
117 |
{ |
752a3b796
|
118 119 120 121 |
/* * Be backwards-compatible with previous kernels, so users * won't get the wrong elevator. */ |
492af6350
|
122 |
strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); |
9b41046cd
|
123 |
return 1; |
1da177e4c
|
124 125 126 |
} __setup("elevator=", elevator_setup); |
bb813f4c9
|
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
/* called during boot to load the elevator chosen by the elevator param */ void __init load_default_elevator_module(void) { struct elevator_type *e; if (!chosen_elevator[0]) return; spin_lock(&elv_list_lock); e = elevator_find(chosen_elevator); spin_unlock(&elv_list_lock); if (!e) request_module("%s-iosched", chosen_elevator); } |
3d1ab40f4
|
142 |
static struct kobj_type elv_ktype; |
d50235b7b
|
143 |
struct elevator_queue *elevator_alloc(struct request_queue *q, |
165125e1e
|
144 |
struct elevator_type *e) |
3d1ab40f4
|
145 |
{ |
b374d18a4
|
146 |
struct elevator_queue *eq; |
9817064b6
|
147 |
|
c1b511eb2
|
148 |
eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); |
9817064b6
|
149 150 |
if (unlikely(!eq)) goto err; |
22f746e23
|
151 |
eq->type = e; |
f9cb074bf
|
152 |
kobject_init(&eq->kobj, &elv_ktype); |
9817064b6
|
153 |
mutex_init(&eq->sysfs_lock); |
242d98f07
|
154 |
hash_init(eq->hash); |
9817064b6
|
155 |
|
3d1ab40f4
|
156 |
return eq; |
9817064b6
|
157 158 159 160 |
err: kfree(eq); elevator_put(e); return NULL; |
3d1ab40f4
|
161 |
} |
d50235b7b
|
162 |
EXPORT_SYMBOL(elevator_alloc); |
3d1ab40f4
|
163 164 165 |
static void elevator_release(struct kobject *kobj) { |
b374d18a4
|
166 |
struct elevator_queue *e; |
9817064b6
|
167 |
|
b374d18a4
|
168 |
e = container_of(kobj, struct elevator_queue, kobj); |
22f746e23
|
169 |
elevator_put(e->type); |
3d1ab40f4
|
170 171 |
kfree(e); } |
165125e1e
|
172 |
int elevator_init(struct request_queue *q, char *name) |
1da177e4c
|
173 174 |
{ struct elevator_type *e = NULL; |
f8fc877d3
|
175 |
int err; |
1da177e4c
|
176 |
|
eb1c160b2
|
177 178 179 180 181 |
/* * q->sysfs_lock must be held to provide mutual exclusion between * elevator_switch() and here. */ lockdep_assert_held(&q->sysfs_lock); |
1abec4fdb
|
182 183 |
if (unlikely(q->elevator)) return 0; |
cb98fc8bb
|
184 185 186 187 |
INIT_LIST_HEAD(&q->queue_head); q->last_merge = NULL; q->end_sector = 0; q->boundary_rq = NULL; |
cb98fc8bb
|
188 |
|
4eb166d98
|
189 |
if (name) { |
21c3c5d28
|
190 |
e = elevator_get(name, true); |
4eb166d98
|
191 192 193 |
if (!e) return -EINVAL; } |
1da177e4c
|
194 |
|
21c3c5d28
|
195 196 197 198 199 |
/* * Use the default elevator specified by config boot param or * config option. Don't try to load modules as we could be running * off async and request_module() isn't allowed from async. */ |
4eb166d98
|
200 |
if (!e && *chosen_elevator) { |
21c3c5d28
|
201 |
e = elevator_get(chosen_elevator, false); |
4eb166d98
|
202 203 204 205 206 |
if (!e) printk(KERN_ERR "I/O scheduler %s not found ", chosen_elevator); } |
248d5ca5e
|
207 |
|
4eb166d98
|
208 |
if (!e) { |
21c3c5d28
|
209 |
e = elevator_get(CONFIG_DEFAULT_IOSCHED, false); |
4eb166d98
|
210 211 212 213 214 |
if (!e) { printk(KERN_ERR "Default I/O scheduler not found. " \ "Using noop. "); |
21c3c5d28
|
215 |
e = elevator_get("noop", false); |
4eb166d98
|
216 |
} |
5f0039764
|
217 |
} |
d50235b7b
|
218 |
err = e->ops.elevator_init_fn(q, e); |
1abec4fdb
|
219 |
return 0; |
1da177e4c
|
220 |
} |
2e662b65f
|
221 |
EXPORT_SYMBOL(elevator_init); |
b374d18a4
|
222 |
void elevator_exit(struct elevator_queue *e) |
1da177e4c
|
223 |
{ |
3d1ab40f4
|
224 |
mutex_lock(&e->sysfs_lock); |
22f746e23
|
225 226 |
if (e->type->ops.elevator_exit_fn) e->type->ops.elevator_exit_fn(e); |
3d1ab40f4
|
227 |
mutex_unlock(&e->sysfs_lock); |
1da177e4c
|
228 |
|
3d1ab40f4
|
229 |
kobject_put(&e->kobj); |
1da177e4c
|
230 |
} |
2e662b65f
|
231 |
EXPORT_SYMBOL(elevator_exit); |
9817064b6
|
232 233 |
static inline void __elv_rqhash_del(struct request *rq) { |
242d98f07
|
234 |
hash_del(&rq->hash); |
360f92c24
|
235 |
rq->cmd_flags &= ~REQ_HASHED; |
9817064b6
|
236 |
} |
165125e1e
|
237 |
static void elv_rqhash_del(struct request_queue *q, struct request *rq) |
9817064b6
|
238 239 240 241 |
{ if (ELV_ON_HASH(rq)) __elv_rqhash_del(rq); } |
165125e1e
|
242 |
static void elv_rqhash_add(struct request_queue *q, struct request *rq) |
9817064b6
|
243 |
{ |
b374d18a4
|
244 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
245 246 |
BUG_ON(ELV_ON_HASH(rq)); |
242d98f07
|
247 |
hash_add(e->hash, &rq->hash, rq_hash_key(rq)); |
360f92c24
|
248 |
rq->cmd_flags |= REQ_HASHED; |
9817064b6
|
249 |
} |
165125e1e
|
250 |
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
9817064b6
|
251 252 253 254 |
{ __elv_rqhash_del(rq); elv_rqhash_add(q, rq); } |
165125e1e
|
255 |
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
9817064b6
|
256 |
{ |
b374d18a4
|
257 |
struct elevator_queue *e = q->elevator; |
b67bfe0d4
|
258 |
struct hlist_node *next; |
9817064b6
|
259 |
struct request *rq; |
ee89f8125
|
260 |
hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { |
9817064b6
|
261 262 263 264 265 266 267 268 269 270 271 272 273 |
BUG_ON(!ELV_ON_HASH(rq)); if (unlikely(!rq_mergeable(rq))) { __elv_rqhash_del(rq); continue; } if (rq_hash_key(rq) == offset) return rq; } return NULL; } |
8922e16cf
|
274 |
/* |
2e662b65f
|
275 276 277 |
* RB-tree support functions for inserting/lookup/removal of requests * in a sorted RB tree. */ |
796d5116c
|
278 |
void elv_rb_add(struct rb_root *root, struct request *rq) |
2e662b65f
|
279 280 281 282 283 284 285 286 |
{ struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct request *__rq; while (*p) { parent = *p; __rq = rb_entry(parent, struct request, rb_node); |
83096ebf1
|
287 |
if (blk_rq_pos(rq) < blk_rq_pos(__rq)) |
2e662b65f
|
288 |
p = &(*p)->rb_left; |
796d5116c
|
289 |
else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) |
2e662b65f
|
290 |
p = &(*p)->rb_right; |
2e662b65f
|
291 292 293 294 |
} rb_link_node(&rq->rb_node, parent, p); rb_insert_color(&rq->rb_node, root); |
2e662b65f
|
295 |
} |
2e662b65f
|
296 297 298 299 300 301 302 303 |
EXPORT_SYMBOL(elv_rb_add); void elv_rb_del(struct rb_root *root, struct request *rq) { BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); rb_erase(&rq->rb_node, root); RB_CLEAR_NODE(&rq->rb_node); } |
2e662b65f
|
304 305 306 307 308 309 310 311 312 |
EXPORT_SYMBOL(elv_rb_del); struct request *elv_rb_find(struct rb_root *root, sector_t sector) { struct rb_node *n = root->rb_node; struct request *rq; while (n) { rq = rb_entry(n, struct request, rb_node); |
83096ebf1
|
313 |
if (sector < blk_rq_pos(rq)) |
2e662b65f
|
314 |
n = n->rb_left; |
83096ebf1
|
315 |
else if (sector > blk_rq_pos(rq)) |
2e662b65f
|
316 317 318 319 320 321 322 |
n = n->rb_right; else return rq; } return NULL; } |
2e662b65f
|
323 324 325 |
EXPORT_SYMBOL(elv_rb_find); /* |
8922e16cf
|
326 |
* Insert rq into dispatch queue of q. Queue lock must be held on |
dbe7f76dd
|
327 |
* entry. rq is sort instead into the dispatch queue. To be used by |
2e662b65f
|
328 |
* specific elevators. |
8922e16cf
|
329 |
*/ |
165125e1e
|
330 |
void elv_dispatch_sort(struct request_queue *q, struct request *rq) |
8922e16cf
|
331 332 |
{ sector_t boundary; |
8922e16cf
|
333 |
struct list_head *entry; |
4eb166d98
|
334 |
int stop_flags; |
8922e16cf
|
335 |
|
06b86245c
|
336 337 |
if (q->last_merge == rq) q->last_merge = NULL; |
9817064b6
|
338 339 |
elv_rqhash_del(q, rq); |
15853af9f
|
340 |
q->nr_sorted--; |
06b86245c
|
341 |
|
1b47f531e
|
342 |
boundary = q->end_sector; |
02e031cbc
|
343 |
stop_flags = REQ_SOFTBARRIER | REQ_STARTED; |
8922e16cf
|
344 345 |
list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); |
33659ebba
|
346 347 |
if ((rq->cmd_flags & REQ_DISCARD) != (pos->cmd_flags & REQ_DISCARD)) |
e17fc0a1c
|
348 |
break; |
783660b2f
|
349 350 |
if (rq_data_dir(rq) != rq_data_dir(pos)) break; |
4eb166d98
|
351 |
if (pos->cmd_flags & stop_flags) |
8922e16cf
|
352 |
break; |
83096ebf1
|
353 354 |
if (blk_rq_pos(rq) >= boundary) { if (blk_rq_pos(pos) < boundary) |
8922e16cf
|
355 356 |
continue; } else { |
83096ebf1
|
357 |
if (blk_rq_pos(pos) >= boundary) |
8922e16cf
|
358 359 |
break; } |
83096ebf1
|
360 |
if (blk_rq_pos(rq) >= blk_rq_pos(pos)) |
8922e16cf
|
361 362 363 364 365 |
break; } list_add(&rq->queuelist, entry); } |
2e662b65f
|
366 |
EXPORT_SYMBOL(elv_dispatch_sort); |
9817064b6
|
367 |
/* |
2e662b65f
|
368 369 370 |
* Insert rq into dispatch queue of q. Queue lock must be held on * entry. rq is added to the back of the dispatch queue. To be used by * specific elevators. |
9817064b6
|
371 372 373 374 375 376 377 378 379 380 381 382 383 384 |
*/ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) { if (q->last_merge == rq) q->last_merge = NULL; elv_rqhash_del(q, rq); q->nr_sorted--; q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; list_add_tail(&rq->queuelist, &q->queue_head); } |
2e662b65f
|
385 |
EXPORT_SYMBOL(elv_dispatch_add_tail); |
165125e1e
|
386 |
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) |
1da177e4c
|
387 |
{ |
b374d18a4
|
388 |
struct elevator_queue *e = q->elevator; |
9817064b6
|
389 |
struct request *__rq; |
06b86245c
|
390 |
int ret; |
9817064b6
|
391 |
/* |
488991e28
|
392 393 394 395 396 397 398 399 400 |
* Levels of merges: * nomerges: No merges at all attempted * noxmerges: Only simple one-hit cache try * merges: All merge tries attempted */ if (blk_queue_nomerges(q)) return ELEVATOR_NO_MERGE; /* |
9817064b6
|
401 402 |
* First try one-hit cache. */ |
050c8ea80
|
403 404 |
if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) { ret = blk_try_merge(q->last_merge, bio); |
06b86245c
|
405 406 407 408 409 |
if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; return ret; } } |
1da177e4c
|
410 |
|
488991e28
|
411 |
if (blk_queue_noxmerges(q)) |
ac9fafa12
|
412 |
return ELEVATOR_NO_MERGE; |
9817064b6
|
413 414 415 |
/* * See if our hash lookup can find a potential backmerge. */ |
4f024f379
|
416 |
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); |
9817064b6
|
417 418 419 420 |
if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_BACK_MERGE; } |
22f746e23
|
421 422 |
if (e->type->ops.elevator_merge_fn) return e->type->ops.elevator_merge_fn(q, req, bio); |
1da177e4c
|
423 424 425 |
return ELEVATOR_NO_MERGE; } |
5e84ea3a9
|
426 427 428 429 430 431 432 433 434 435 436 |
/* * Attempt to do an insertion back merge. Only check for the case where * we can append 'rq' to an existing request, so we can throw 'rq' away * afterwards. * * Returns true if we merged, false otherwise */ static bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) { struct request *__rq; |
bee0393cc
|
437 |
bool ret; |
5e84ea3a9
|
438 439 440 441 442 443 444 445 446 447 448 449 |
if (blk_queue_nomerges(q)) return false; /* * First try one-hit cache. */ if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) return true; if (blk_queue_noxmerges(q)) return false; |
bee0393cc
|
450 |
ret = false; |
5e84ea3a9
|
451 452 453 |
/* * See if our hash lookup can find a potential backmerge. */ |
bee0393cc
|
454 455 456 457 458 459 460 461 462 |
while (1) { __rq = elv_rqhash_find(q, blk_rq_pos(rq)); if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) break; /* The merged request could be merged with others, try again */ ret = true; rq = __rq; } |
274193224
|
463 |
|
bee0393cc
|
464 |
return ret; |
5e84ea3a9
|
465 |
} |
165125e1e
|
466 |
void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
1da177e4c
|
467 |
{ |
b374d18a4
|
468 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
469 |
|
22f746e23
|
470 471 |
if (e->type->ops.elevator_merged_fn) e->type->ops.elevator_merged_fn(q, rq, type); |
06b86245c
|
472 |
|
2e662b65f
|
473 474 |
if (type == ELEVATOR_BACK_MERGE) elv_rqhash_reposition(q, rq); |
9817064b6
|
475 |
|
06b86245c
|
476 |
q->last_merge = rq; |
1da177e4c
|
477 |
} |
165125e1e
|
478 |
void elv_merge_requests(struct request_queue *q, struct request *rq, |
1da177e4c
|
479 480 |
struct request *next) { |
b374d18a4
|
481 |
struct elevator_queue *e = q->elevator; |
5e84ea3a9
|
482 |
const int next_sorted = next->cmd_flags & REQ_SORTED; |
1da177e4c
|
483 |
|
22f746e23
|
484 485 |
if (next_sorted && e->type->ops.elevator_merge_req_fn) e->type->ops.elevator_merge_req_fn(q, rq, next); |
06b86245c
|
486 |
|
9817064b6
|
487 |
elv_rqhash_reposition(q, rq); |
9817064b6
|
488 |
|
5e84ea3a9
|
489 490 491 492 |
if (next_sorted) { elv_rqhash_del(q, next); q->nr_sorted--; } |
06b86245c
|
493 |
q->last_merge = rq; |
1da177e4c
|
494 |
} |
812d40264
|
495 496 497 498 |
void elv_bio_merged(struct request_queue *q, struct request *rq, struct bio *bio) { struct elevator_queue *e = q->elevator; |
22f746e23
|
499 500 |
if (e->type->ops.elevator_bio_merged_fn) e->type->ops.elevator_bio_merged_fn(q, rq, bio); |
812d40264
|
501 |
} |
c8158819d
|
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 |
#ifdef CONFIG_PM_RUNTIME static void blk_pm_requeue_request(struct request *rq) { if (rq->q->dev && !(rq->cmd_flags & REQ_PM)) rq->q->nr_pending--; } static void blk_pm_add_request(struct request_queue *q, struct request *rq) { if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 && (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING)) pm_request_resume(q->dev); } #else static inline void blk_pm_requeue_request(struct request *rq) {} static inline void blk_pm_add_request(struct request_queue *q, struct request *rq) { } #endif |
165125e1e
|
522 |
void elv_requeue_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
523 |
{ |
1da177e4c
|
524 525 526 527 |
/* * it already went through dequeue, we need to decrement the * in_flight count again */ |
8922e16cf
|
528 |
if (blk_account_rq(rq)) { |
0a7ae2ff0
|
529 |
q->in_flight[rq_is_sync(rq)]--; |
33659ebba
|
530 |
if (rq->cmd_flags & REQ_SORTED) |
cad975164
|
531 |
elv_deactivate_rq(q, rq); |
8922e16cf
|
532 |
} |
1da177e4c
|
533 |
|
4aff5e233
|
534 |
rq->cmd_flags &= ~REQ_STARTED; |
1da177e4c
|
535 |
|
c8158819d
|
536 |
blk_pm_requeue_request(rq); |
b710a4805
|
537 |
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); |
1da177e4c
|
538 |
} |
26308eab6
|
539 |
void elv_drain_elevator(struct request_queue *q) |
15853af9f
|
540 541 |
{ static int printed; |
e3c78ca52
|
542 543 |
lockdep_assert_held(q->queue_lock); |
22f746e23
|
544 |
while (q->elevator->type->ops.elevator_dispatch_fn(q, 1)) |
15853af9f
|
545 |
; |
e3c78ca52
|
546 |
if (q->nr_sorted && printed++ < 10) { |
15853af9f
|
547 548 549 |
printk(KERN_ERR "%s: forced dispatching is broken " "(nr_sorted=%u), please report this ", |
22f746e23
|
550 |
q->elevator->type->elevator_name, q->nr_sorted); |
15853af9f
|
551 552 |
} } |
b710a4805
|
553 |
void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
1da177e4c
|
554 |
{ |
5f3ea37c7
|
555 |
trace_block_rq_insert(q, rq); |
2056a782f
|
556 |
|
c8158819d
|
557 |
blk_pm_add_request(q, rq); |
1da177e4c
|
558 |
rq->q = q; |
b710a4805
|
559 560 |
if (rq->cmd_flags & REQ_SOFTBARRIER) { /* barriers are scheduling boundary, update end_sector */ |
e2a60da74
|
561 |
if (rq->cmd_type == REQ_TYPE_FS) { |
b710a4805
|
562 563 564 565 |
q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; } } else if (!(rq->cmd_flags & REQ_ELVPRIV) && |
3aa72873f
|
566 567 |
(where == ELEVATOR_INSERT_SORT || where == ELEVATOR_INSERT_SORT_MERGE)) |
b710a4805
|
568 |
where = ELEVATOR_INSERT_BACK; |
8922e16cf
|
569 |
switch (where) { |
28e7d1845
|
570 |
case ELEVATOR_INSERT_REQUEUE: |
8922e16cf
|
571 |
case ELEVATOR_INSERT_FRONT: |
4aff5e233
|
572 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
8922e16cf
|
573 574 575 576 |
list_add(&rq->queuelist, &q->queue_head); break; case ELEVATOR_INSERT_BACK: |
4aff5e233
|
577 |
rq->cmd_flags |= REQ_SOFTBARRIER; |
15853af9f
|
578 |
elv_drain_elevator(q); |
8922e16cf
|
579 580 581 582 583 584 585 586 587 588 589 |
list_add_tail(&rq->queuelist, &q->queue_head); /* * We kick the queue here for the following reasons. * - The elevator might have returned NULL previously * to delay requests and returned them now. As the * queue wasn't empty before this request, ll_rw_blk * won't run the queue on return, resulting in hang. * - Usually, back inserted requests won't be merged * with anything. There's no point in delaying queue * processing. */ |
24ecfbe27
|
590 |
__blk_run_queue(q); |
8922e16cf
|
591 |
break; |
5e84ea3a9
|
592 593 594 595 596 597 598 599 |
case ELEVATOR_INSERT_SORT_MERGE: /* * If we succeed in merging this request with one in the * queue already, we are done - rq has now been freed, * so no need to do anything further. */ if (elv_attempt_insert_merge(q, rq)) break; |
8922e16cf
|
600 |
case ELEVATOR_INSERT_SORT: |
e2a60da74
|
601 |
BUG_ON(rq->cmd_type != REQ_TYPE_FS); |
4aff5e233
|
602 |
rq->cmd_flags |= REQ_SORTED; |
15853af9f
|
603 |
q->nr_sorted++; |
9817064b6
|
604 605 606 607 608 |
if (rq_mergeable(rq)) { elv_rqhash_add(q, rq); if (!q->last_merge) q->last_merge = rq; } |
ca23509fb
|
609 610 611 612 613 |
/* * Some ioscheds (cfq) run q->request_fn directly, so * rq cannot be accessed after calling * elevator_add_req_fn. */ |
22f746e23
|
614 |
q->elevator->type->ops.elevator_add_req_fn(q, rq); |
8922e16cf
|
615 |
break; |
ae1b15396
|
616 617 618 619 |
case ELEVATOR_INSERT_FLUSH: rq->cmd_flags |= REQ_SOFTBARRIER; blk_insert_flush(rq); break; |
8922e16cf
|
620 621 622 |
default: printk(KERN_ERR "%s: bad insertion point %d ", |
24c03d47d
|
623 |
__func__, where); |
8922e16cf
|
624 625 |
BUG(); } |
1da177e4c
|
626 |
} |
2e662b65f
|
627 |
EXPORT_SYMBOL(__elv_add_request); |
7eaceacca
|
628 |
void elv_add_request(struct request_queue *q, struct request *rq, int where) |
1da177e4c
|
629 630 631 632 |
{ unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); |
7eaceacca
|
633 |
__elv_add_request(q, rq, where); |
1da177e4c
|
634 635 |
spin_unlock_irqrestore(q->queue_lock, flags); } |
2e662b65f
|
636 |
EXPORT_SYMBOL(elv_add_request); |
165125e1e
|
637 |
struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
638 |
{ |
b374d18a4
|
639 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
640 |
|
22f746e23
|
641 642 |
if (e->type->ops.elevator_latter_req_fn) return e->type->ops.elevator_latter_req_fn(q, rq); |
1da177e4c
|
643 644 |
return NULL; } |
165125e1e
|
645 |
struct request *elv_former_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
646 |
{ |
b374d18a4
|
647 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
648 |
|
22f746e23
|
649 650 |
if (e->type->ops.elevator_former_req_fn) return e->type->ops.elevator_former_req_fn(q, rq); |
1da177e4c
|
651 652 |
return NULL; } |
852c788f8
|
653 654 |
int elv_set_request(struct request_queue *q, struct request *rq, struct bio *bio, gfp_t gfp_mask) |
1da177e4c
|
655 |
{ |
b374d18a4
|
656 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
657 |
|
22f746e23
|
658 |
if (e->type->ops.elevator_set_req_fn) |
852c788f8
|
659 |
return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask); |
1da177e4c
|
660 661 |
return 0; } |
165125e1e
|
662 |
void elv_put_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
663 |
{ |
b374d18a4
|
664 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
665 |
|
22f746e23
|
666 667 |
if (e->type->ops.elevator_put_req_fn) e->type->ops.elevator_put_req_fn(rq); |
1da177e4c
|
668 |
} |
165125e1e
|
669 |
int elv_may_queue(struct request_queue *q, int rw) |
1da177e4c
|
670 |
{ |
b374d18a4
|
671 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
672 |
|
22f746e23
|
673 674 |
if (e->type->ops.elevator_may_queue_fn) return e->type->ops.elevator_may_queue_fn(q, rw); |
1da177e4c
|
675 676 677 |
return ELV_MQUEUE_MAY; } |
11914a53d
|
678 679 680 |
void elv_abort_queue(struct request_queue *q) { struct request *rq; |
ae1b15396
|
681 |
blk_abort_flushes(q); |
11914a53d
|
682 683 684 |
while (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); rq->cmd_flags |= REQ_QUIET; |
5f3ea37c7
|
685 |
trace_block_rq_abort(q, rq); |
53c663ce0
|
686 687 688 689 690 |
/* * Mark this request as started so we don't trigger * any debug logic in the end I/O path. */ blk_start_request(rq); |
40cbbb781
|
691 |
__blk_end_request_all(rq, -EIO); |
11914a53d
|
692 693 694 |
} } EXPORT_SYMBOL(elv_abort_queue); |
165125e1e
|
695 |
void elv_completed_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
696 |
{ |
b374d18a4
|
697 |
struct elevator_queue *e = q->elevator; |
1da177e4c
|
698 699 700 701 |
/* * request is released from the driver, io must be done */ |
8922e16cf
|
702 |
if (blk_account_rq(rq)) { |
0a7ae2ff0
|
703 |
q->in_flight[rq_is_sync(rq)]--; |
33659ebba
|
704 |
if ((rq->cmd_flags & REQ_SORTED) && |
22f746e23
|
705 706 |
e->type->ops.elevator_completed_req_fn) e->type->ops.elevator_completed_req_fn(q, rq); |
1bc691d35
|
707 |
} |
1da177e4c
|
708 |
} |
3d1ab40f4
|
709 710 711 712 |
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) static ssize_t elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
1da177e4c
|
713 |
{ |
3d1ab40f4
|
714 |
struct elv_fs_entry *entry = to_elv(attr); |
b374d18a4
|
715 |
struct elevator_queue *e; |
3d1ab40f4
|
716 717 718 719 |
ssize_t error; if (!entry->show) return -EIO; |
b374d18a4
|
720 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
721 |
mutex_lock(&e->sysfs_lock); |
22f746e23
|
722 |
error = e->type ? entry->show(e, page) : -ENOENT; |
3d1ab40f4
|
723 724 725 |
mutex_unlock(&e->sysfs_lock); return error; } |
1da177e4c
|
726 |
|
3d1ab40f4
|
727 728 729 730 |
static ssize_t elv_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { |
3d1ab40f4
|
731 |
struct elv_fs_entry *entry = to_elv(attr); |
b374d18a4
|
732 |
struct elevator_queue *e; |
3d1ab40f4
|
733 |
ssize_t error; |
1da177e4c
|
734 |
|
3d1ab40f4
|
735 736 |
if (!entry->store) return -EIO; |
1da177e4c
|
737 |
|
b374d18a4
|
738 |
e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f4
|
739 |
mutex_lock(&e->sysfs_lock); |
22f746e23
|
740 |
error = e->type ? entry->store(e, page, length) : -ENOENT; |
3d1ab40f4
|
741 742 743 |
mutex_unlock(&e->sysfs_lock); return error; } |
52cf25d0a
|
744 |
static const struct sysfs_ops elv_sysfs_ops = { |
3d1ab40f4
|
745 746 747 748 749 750 751 752 |
.show = elv_attr_show, .store = elv_attr_store, }; static struct kobj_type elv_ktype = { .sysfs_ops = &elv_sysfs_ops, .release = elevator_release, }; |
5a5bafdc3
|
753 |
int elv_register_queue(struct request_queue *q) |
3d1ab40f4
|
754 |
{ |
5a5bafdc3
|
755 |
struct elevator_queue *e = q->elevator; |
3d1ab40f4
|
756 |
int error; |
b2d6db587
|
757 |
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
3d1ab40f4
|
758 |
if (!error) { |
22f746e23
|
759 |
struct elv_fs_entry *attr = e->type->elevator_attrs; |
3d1ab40f4
|
760 |
if (attr) { |
e572ec7e4
|
761 762 |
while (attr->attr.name) { if (sysfs_create_file(&e->kobj, &attr->attr)) |
3d1ab40f4
|
763 |
break; |
e572ec7e4
|
764 |
attr++; |
3d1ab40f4
|
765 766 767 |
} } kobject_uevent(&e->kobj, KOBJ_ADD); |
430c62fb2
|
768 |
e->registered = 1; |
3d1ab40f4
|
769 770 |
} return error; |
1da177e4c
|
771 |
} |
f8fc877d3
|
772 |
EXPORT_SYMBOL(elv_register_queue); |
bc1c11697
|
773 |
|
1da177e4c
|
774 775 |
void elv_unregister_queue(struct request_queue *q) { |
f8fc877d3
|
776 777 778 779 780 781 782 |
if (q) { struct elevator_queue *e = q->elevator; kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); e->registered = 0; } |
1da177e4c
|
783 |
} |
01effb0dc
|
784 |
EXPORT_SYMBOL(elv_unregister_queue); |
1da177e4c
|
785 |
|
3d3c2379f
|
786 |
int elv_register(struct elevator_type *e) |
1da177e4c
|
787 |
{ |
1ffb96c58
|
788 |
char *def = ""; |
2a12dcd71
|
789 |
|
3d3c2379f
|
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 |
/* create icq_cache if requested */ if (e->icq_size) { if (WARN_ON(e->icq_size < sizeof(struct io_cq)) || WARN_ON(e->icq_align < __alignof__(struct io_cq))) return -EINVAL; snprintf(e->icq_cache_name, sizeof(e->icq_cache_name), "%s_io_cq", e->elevator_name); e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size, e->icq_align, 0, NULL); if (!e->icq_cache) return -ENOMEM; } /* register, don't allow duplicate names */ |
2a12dcd71
|
805 |
spin_lock(&elv_list_lock); |
3d3c2379f
|
806 807 808 809 810 811 |
if (elevator_find(e->elevator_name)) { spin_unlock(&elv_list_lock); if (e->icq_cache) kmem_cache_destroy(e->icq_cache); return -EBUSY; } |
1da177e4c
|
812 |
list_add_tail(&e->list, &elv_list); |
2a12dcd71
|
813 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
814 |
|
3d3c2379f
|
815 |
/* print pretty message */ |
5f0039764
|
816 817 818 |
if (!strcmp(e->elevator_name, chosen_elevator) || (!*chosen_elevator && !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) |
1ffb96c58
|
819 |
def = " (default)"; |
4eb166d98
|
820 821 822 |
printk(KERN_INFO "io scheduler %s registered%s ", e->elevator_name, def); |
3d3c2379f
|
823 |
return 0; |
1da177e4c
|
824 825 826 827 828 |
} EXPORT_SYMBOL_GPL(elv_register); void elv_unregister(struct elevator_type *e) { |
3d3c2379f
|
829 |
/* unregister */ |
2a12dcd71
|
830 |
spin_lock(&elv_list_lock); |
1da177e4c
|
831 |
list_del_init(&e->list); |
2a12dcd71
|
832 |
spin_unlock(&elv_list_lock); |
3d3c2379f
|
833 834 835 836 837 838 839 840 841 842 |
/* * Destroy icq_cache if it exists. icq's are RCU managed. Make * sure all RCU operations are complete before proceeding. */ if (e->icq_cache) { rcu_barrier(); kmem_cache_destroy(e->icq_cache); e->icq_cache = NULL; } |
1da177e4c
|
843 844 845 846 847 848 849 |
} EXPORT_SYMBOL_GPL(elv_unregister); /* * switch to new_e io scheduler. be careful not to introduce deadlocks - * we don't free the old io scheduler, before we have allocated what we * need for the new one. this way we have a chance of going back to the old |
cb98fc8bb
|
850 |
* one, if the new one fails init for some reason. |
1da177e4c
|
851 |
*/ |
165125e1e
|
852 |
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
1da177e4c
|
853 |
{ |
5a5bafdc3
|
854 855 |
struct elevator_queue *old = q->elevator; bool registered = old->registered; |
e8989fae3
|
856 |
int err; |
1da177e4c
|
857 |
|
5a5bafdc3
|
858 859 860 861 862 863 864 |
/* * Turn on BYPASS and drain all requests w/ elevator private data. * Block layer doesn't call into a quiesced elevator - all requests * are directly put on the dispatch list without elevator data * using INSERT_BACK. All requests have SOFTBARRIER set and no * merge happens either. */ |
d732580b4
|
865 |
blk_queue_bypass_start(q); |
cb98fc8bb
|
866 |
|
5a5bafdc3
|
867 868 |
/* unregister and clear all auxiliary data of the old elevator */ if (registered) |
f8fc877d3
|
869 |
elv_unregister_queue(q); |
1da177e4c
|
870 |
|
f8fc877d3
|
871 |
spin_lock_irq(q->queue_lock); |
7e5a87944
|
872 |
ioc_clear_queue(q); |
f8fc877d3
|
873 |
spin_unlock_irq(q->queue_lock); |
5a5bafdc3
|
874 |
/* allocate, init and register new elevator */ |
d50235b7b
|
875 876 |
err = new_e->ops.elevator_init_fn(q, new_e); if (err) |
5a5bafdc3
|
877 |
goto fail_init; |
5a5bafdc3
|
878 879 880 881 882 883 884 885 |
if (registered) { err = elv_register_queue(q); if (err) goto fail_register; } /* done, kill the old one and finish */ elevator_exit(old); |
d732580b4
|
886 |
blk_queue_bypass_end(q); |
75ad23bc0
|
887 |
|
5a5bafdc3
|
888 |
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); |
4722dc52a
|
889 |
|
5dd531a03
|
890 |
return 0; |
1da177e4c
|
891 892 |
fail_register: |
5a5bafdc3
|
893 894 895 896 |
elevator_exit(q->elevator); fail_init: /* switch failed, restore and re-register old elevator */ q->elevator = old; |
1da177e4c
|
897 |
elv_register_queue(q); |
d732580b4
|
898 |
blk_queue_bypass_end(q); |
75ad23bc0
|
899 |
|
5dd531a03
|
900 |
return err; |
1da177e4c
|
901 |
} |
5dd531a03
|
902 903 904 |
/* * Switch this queue to the given IO scheduler. */ |
7c8a3679e
|
905 |
static int __elevator_change(struct request_queue *q, const char *name) |
1da177e4c
|
906 907 908 |
{ char elevator_name[ELV_NAME_MAX]; struct elevator_type *e; |
cd43e26f0
|
909 |
if (!q->elevator) |
5dd531a03
|
910 |
return -ENXIO; |
cd43e26f0
|
911 |
|
ee2e992cc
|
912 |
strlcpy(elevator_name, name, sizeof(elevator_name)); |
21c3c5d28
|
913 |
e = elevator_get(strstrip(elevator_name), true); |
1da177e4c
|
914 915 916 917 918 |
if (!e) { printk(KERN_ERR "elevator: type %s not found ", elevator_name); return -EINVAL; } |
22f746e23
|
919 |
if (!strcmp(elevator_name, q->elevator->type->elevator_name)) { |
2ca7d93bb
|
920 |
elevator_put(e); |
5dd531a03
|
921 |
return 0; |
2ca7d93bb
|
922 |
} |
1da177e4c
|
923 |
|
5dd531a03
|
924 925 |
return elevator_switch(q, e); } |
7c8a3679e
|
926 927 928 929 930 931 932 933 934 935 936 937 |
int elevator_change(struct request_queue *q, const char *name) { int ret; /* Protect q->elevator from elevator_init() */ mutex_lock(&q->sysfs_lock); ret = __elevator_change(q, name); mutex_unlock(&q->sysfs_lock); return ret; } |
5dd531a03
|
938 939 940 941 942 943 944 945 946 |
EXPORT_SYMBOL(elevator_change); ssize_t elv_iosched_store(struct request_queue *q, const char *name, size_t count) { int ret; if (!q->elevator) return count; |
7c8a3679e
|
947 |
ret = __elevator_change(q, name); |
5dd531a03
|
948 949 950 951 952 953 |
if (!ret) return count; printk(KERN_ERR "elevator: switch to %s failed ", name); return ret; |
1da177e4c
|
954 |
} |
165125e1e
|
955 |
ssize_t elv_iosched_show(struct request_queue *q, char *name) |
1da177e4c
|
956 |
{ |
b374d18a4
|
957 |
struct elevator_queue *e = q->elevator; |
cd43e26f0
|
958 |
struct elevator_type *elv; |
70cee26e0
|
959 |
struct elevator_type *__e; |
1da177e4c
|
960 |
int len = 0; |
e36f724b4
|
961 |
if (!q->elevator || !blk_queue_stackable(q)) |
cd43e26f0
|
962 963 |
return sprintf(name, "none "); |
22f746e23
|
964 |
elv = e->type; |
cd43e26f0
|
965 |
|
2a12dcd71
|
966 |
spin_lock(&elv_list_lock); |
70cee26e0
|
967 |
list_for_each_entry(__e, &elv_list, list) { |
1da177e4c
|
968 969 970 971 972 |
if (!strcmp(elv->elevator_name, __e->elevator_name)) len += sprintf(name+len, "[%s] ", elv->elevator_name); else len += sprintf(name+len, "%s ", __e->elevator_name); } |
2a12dcd71
|
973 |
spin_unlock(&elv_list_lock); |
1da177e4c
|
974 975 976 977 978 |
len += sprintf(len+name, " "); return len; } |
165125e1e
|
979 980 |
struct request *elv_rb_former_request(struct request_queue *q, struct request *rq) |
2e662b65f
|
981 982 983 984 985 986 987 988 |
{ struct rb_node *rbprev = rb_prev(&rq->rb_node); if (rbprev) return rb_entry_rq(rbprev); return NULL; } |
2e662b65f
|
989 |
EXPORT_SYMBOL(elv_rb_former_request); |
165125e1e
|
990 991 |
struct request *elv_rb_latter_request(struct request_queue *q, struct request *rq) |
2e662b65f
|
992 993 994 995 996 997 998 999 |
{ struct rb_node *rbnext = rb_next(&rq->rb_node); if (rbnext) return rb_entry_rq(rbnext); return NULL; } |
2e662b65f
|
1000 |
EXPORT_SYMBOL(elv_rb_latter_request); |