Blame view
block/cfq-iosched.c
63.9 KB
1da177e4c
|
1 |
/* |
1da177e4c
|
2 3 4 5 6 |
* CFQ, or complete fairness queueing, disk scheduler. * * Based on ideas from a previously unfinished io * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. * |
0fe234795
|
7 |
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
1da177e4c
|
8 |
*/ |
1da177e4c
|
9 |
#include <linux/module.h> |
1cc9be68e
|
10 11 |
#include <linux/blkdev.h> #include <linux/elevator.h> |
1da177e4c
|
12 |
#include <linux/rbtree.h> |
22e2c507c
|
13 |
#include <linux/ioprio.h> |
7b679138b
|
14 |
#include <linux/blktrace_api.h> |
1da177e4c
|
15 16 17 18 |
/* * tunables */ |
fe094d98e
|
19 20 |
/* max queue in one round of service */ static const int cfq_quantum = 4; |
64100099e
|
21 |
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; |
fe094d98e
|
22 23 24 25 |
/* maximum backwards seek, in KiB */ static const int cfq_back_max = 16 * 1024; /* penalty of a backwards seek */ static const int cfq_back_penalty = 2; |
64100099e
|
26 |
static const int cfq_slice_sync = HZ / 10; |
3b18152c3
|
27 |
static int cfq_slice_async = HZ / 25; |
64100099e
|
28 |
static const int cfq_slice_async_rq = 2; |
caaa5f9f0
|
29 |
static int cfq_slice_idle = HZ / 125; |
22e2c507c
|
30 |
|
d9e7620e6
|
31 |
/* |
0871714e0
|
32 |
* offset from end of service tree |
d9e7620e6
|
33 |
*/ |
0871714e0
|
34 |
#define CFQ_IDLE_DELAY (HZ / 5) |
d9e7620e6
|
35 36 37 38 39 |
/* * below this threshold, we consider thinktime immediate */ #define CFQ_MIN_TT (2) |
22e2c507c
|
40 |
#define CFQ_SLICE_SCALE (5) |
45333d5a3
|
41 |
#define CFQ_HW_QUEUE_MIN (5) |
22e2c507c
|
42 |
|
fe094d98e
|
43 44 |
#define RQ_CIC(rq) \ ((struct cfq_io_context *) (rq)->elevator_private) |
7b679138b
|
45 |
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) |
1da177e4c
|
46 |
|
e18b890bb
|
47 48 |
static struct kmem_cache *cfq_pool; static struct kmem_cache *cfq_ioc_pool; |
1da177e4c
|
49 |
|
4050cf167
|
50 |
static DEFINE_PER_CPU(unsigned long, ioc_count); |
334e94de9
|
51 |
static struct completion *ioc_gone; |
9a11b4ed0
|
52 |
static DEFINE_SPINLOCK(ioc_gone_lock); |
334e94de9
|
53 |
|
22e2c507c
|
54 55 |
#define CFQ_PRIO_LISTS IOPRIO_BE_NR #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
22e2c507c
|
56 |
#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) |
206dc69b3
|
57 |
#define sample_valid(samples) ((samples) > 80) |
22e2c507c
|
58 |
/* |
cc09e2990
|
59 60 61 62 63 64 65 66 67 68 69 70 |
* Most of our rbtree usage is for sorting with min extraction, so * if we cache the leftmost node we don't have to walk down the tree * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should * move this into the elevator for the rq sorting as well. */ struct cfq_rb_root { struct rb_root rb; struct rb_node *left; }; #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } /* |
6118b70b3
|
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
* Per process-grouping structure */ struct cfq_queue { /* reference count */ atomic_t ref; /* various state flags, see below */ unsigned int flags; /* parent cfq_data */ struct cfq_data *cfqd; /* service_tree member */ struct rb_node rb_node; /* service_tree key */ unsigned long rb_key; /* prio tree member */ struct rb_node p_node; /* prio tree root we belong to, if any */ struct rb_root *p_root; /* sorted list of pending requests */ struct rb_root sort_list; /* if fifo isn't expired, next request to serve */ struct request *next_rq; /* requests queued in sort_list */ int queued[2]; /* currently allocated requests */ int allocated[2]; /* fifo list of requests in sort_list */ struct list_head fifo; unsigned long slice_end; long slice_resid; unsigned int slice_dispatch; /* pending metadata requests */ int meta_pending; /* number of requests that are on the dispatch list or inside driver */ int dispatched; /* io prio of this group */ unsigned short ioprio, org_ioprio; unsigned short ioprio_class, org_ioprio_class; pid_t pid; }; /* |
22e2c507c
|
116 117 |
* Per block device queue structure */ |
1da177e4c
|
118 |
struct cfq_data { |
165125e1e
|
119 |
struct request_queue *queue; |
22e2c507c
|
120 121 122 123 |
/* * rr list of queues with requests and the count of them */ |
cc09e2990
|
124 |
struct cfq_rb_root service_tree; |
a36e71f99
|
125 126 127 128 129 130 131 |
/* * Each priority tree is sorted by next_request position. These * trees are used when determining if two or more queues are * interleaving requests (see cfq_close_cooperator). */ struct rb_root prio_trees[CFQ_PRIO_LISTS]; |
22e2c507c
|
132 |
unsigned int busy_queues; |
3a9a3f6cc
|
133 134 135 136 137 |
/* * Used to track any pending rt requests so we can pre-empt current * non-RT cfqq in service when this value is non-zero. */ unsigned int busy_rt_queues; |
22e2c507c
|
138 |
|
22e2c507c
|
139 |
int rq_in_driver; |
3ed9a2965
|
140 |
int sync_flight; |
45333d5a3
|
141 142 143 144 145 |
/* * queue-depth detection */ int rq_queued; |
25776e359
|
146 |
int hw_tag; |
45333d5a3
|
147 148 |
int hw_tag_samples; int rq_in_driver_peak; |
1da177e4c
|
149 |
|
22e2c507c
|
150 |
/* |
22e2c507c
|
151 152 153 154 |
* idle window management */ struct timer_list idle_slice_timer; struct work_struct unplug_work; |
1da177e4c
|
155 |
|
22e2c507c
|
156 157 |
struct cfq_queue *active_queue; struct cfq_io_context *active_cic; |
22e2c507c
|
158 |
|
c2dea2d1f
|
159 160 161 162 163 |
/* * async queue for each priority case */ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; struct cfq_queue *async_idle_cfqq; |
15c31be4d
|
164 |
|
6d048f531
|
165 |
sector_t last_position; |
1da177e4c
|
166 |
|
1da177e4c
|
167 168 169 170 |
/* * tunables, see top of file */ unsigned int cfq_quantum; |
22e2c507c
|
171 |
unsigned int cfq_fifo_expire[2]; |
1da177e4c
|
172 173 |
unsigned int cfq_back_penalty; unsigned int cfq_back_max; |
22e2c507c
|
174 175 176 |
unsigned int cfq_slice[2]; unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; |
d9ff41879
|
177 178 |
struct list_head cic_list; |
1da177e4c
|
179 |
|
6118b70b3
|
180 181 182 183 |
/* * Fallback dummy cfqq for extreme OOM conditions */ struct cfq_queue oom_cfqq; |
1da177e4c
|
184 |
}; |
3b18152c3
|
185 |
enum cfqq_state_flags { |
b0b8d7494
|
186 187 |
CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ |
b029195dd
|
188 |
CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ |
b0b8d7494
|
189 190 |
CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ |
b0b8d7494
|
191 192 193 |
CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ |
44f7c1606
|
194 |
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ |
91fac317a
|
195 |
CFQ_CFQQ_FLAG_sync, /* synchronous queue */ |
a36e71f99
|
196 |
CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ |
3b18152c3
|
197 198 199 200 201 |
}; #define CFQ_CFQQ_FNS(name) \ static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ { \ |
fe094d98e
|
202 |
(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ |
3b18152c3
|
203 204 205 |
} \ static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ { \ |
fe094d98e
|
206 |
(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ |
3b18152c3
|
207 208 209 |
} \ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ { \ |
fe094d98e
|
210 |
return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ |
3b18152c3
|
211 212 213 214 |
} CFQ_CFQQ_FNS(on_rr); CFQ_CFQQ_FNS(wait_request); |
b029195dd
|
215 |
CFQ_CFQQ_FNS(must_dispatch); |
3b18152c3
|
216 217 |
CFQ_CFQQ_FNS(must_alloc); CFQ_CFQQ_FNS(must_alloc_slice); |
3b18152c3
|
218 219 220 |
CFQ_CFQQ_FNS(fifo_expire); CFQ_CFQQ_FNS(idle_window); CFQ_CFQQ_FNS(prio_changed); |
44f7c1606
|
221 |
CFQ_CFQQ_FNS(slice_new); |
91fac317a
|
222 |
CFQ_CFQQ_FNS(sync); |
a36e71f99
|
223 |
CFQ_CFQQ_FNS(coop); |
3b18152c3
|
224 |
#undef CFQ_CFQQ_FNS |
7b679138b
|
225 226 227 228 |
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) #define cfq_log(cfqd, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) |
165125e1e
|
229 |
static void cfq_dispatch_insert(struct request_queue *, struct request *); |
91fac317a
|
230 |
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, |
fd0928df9
|
231 |
struct io_context *, gfp_t); |
4ac845a2e
|
232 |
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, |
91fac317a
|
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 |
struct io_context *); static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, int is_sync) { return cic->cfqq[!!is_sync]; } static inline void cic_set_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq, int is_sync) { cic->cfqq[!!is_sync] = cfqq; } /* * We regard a request as SYNC, if it's either a read or has the SYNC bit * set (in which case it could also be direct WRITE). */ static inline int cfq_bio_sync(struct bio *bio) { if (bio_data_dir(bio) == READ || bio_sync(bio)) return 1; return 0; } |
1da177e4c
|
258 |
|
1da177e4c
|
259 |
/* |
99f95e528
|
260 261 262 263 264 |
* scheduler run of queue, if there are requests pending and no one in the * driver that will restart queueing */ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { |
7b679138b
|
265 266 |
if (cfqd->busy_queues) { cfq_log(cfqd, "schedule dispatch"); |
18887ad91
|
267 |
kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); |
7b679138b
|
268 |
} |
99f95e528
|
269 |
} |
165125e1e
|
270 |
static int cfq_queue_empty(struct request_queue *q) |
99f95e528
|
271 272 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; |
b4878f245
|
273 |
return !cfqd->busy_queues; |
99f95e528
|
274 |
} |
1da177e4c
|
275 |
/* |
44f7c1606
|
276 277 278 279 |
* Scale schedule slice based on io priority. Use the sync time slice only * if a queue is marked sync and has sync io queued. A sync queue with async * io only, should not get full sync slice length. */ |
d9e7620e6
|
280 281 |
static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, unsigned short prio) |
44f7c1606
|
282 |
{ |
d9e7620e6
|
283 |
const int base_slice = cfqd->cfq_slice[sync]; |
44f7c1606
|
284 |
|
d9e7620e6
|
285 286 287 288 |
WARN_ON(prio >= IOPRIO_BE_NR); return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); } |
44f7c1606
|
289 |
|
d9e7620e6
|
290 291 292 293 |
static inline int cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); |
44f7c1606
|
294 295 296 297 298 299 |
} static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; |
7b679138b
|
300 |
cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); |
44f7c1606
|
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 |
} /* * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end * isn't valid until the first request from the dispatch is activated * and the slice time set. */ static inline int cfq_slice_used(struct cfq_queue *cfqq) { if (cfq_cfqq_slice_new(cfqq)) return 0; if (time_before(jiffies, cfqq->slice_end)) return 0; return 1; } /* |
5e7053747
|
319 |
* Lifted from AS - choose which of rq1 and rq2 that is best served now. |
1da177e4c
|
320 |
* We choose the request that is closest to the head right now. Distance |
e8a99053e
|
321 |
* behind the head is penalized and only allowed to a certain extent. |
1da177e4c
|
322 |
*/ |
5e7053747
|
323 324 |
static struct request * cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) |
1da177e4c
|
325 326 |
{ sector_t last, s1, s2, d1 = 0, d2 = 0; |
1da177e4c
|
327 |
unsigned long back_max; |
e8a99053e
|
328 329 330 |
#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ unsigned wrap = 0; /* bit mask: requests behind the disk head? */ |
1da177e4c
|
331 |
|
5e7053747
|
332 333 334 335 |
if (rq1 == NULL || rq1 == rq2) return rq2; if (rq2 == NULL) return rq1; |
9c2c38a12
|
336 |
|
5e7053747
|
337 338 339 340 |
if (rq_is_sync(rq1) && !rq_is_sync(rq2)) return rq1; else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) return rq2; |
374f84ac3
|
341 342 343 344 |
if (rq_is_meta(rq1) && !rq_is_meta(rq2)) return rq1; else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) return rq2; |
1da177e4c
|
345 |
|
83096ebf1
|
346 347 |
s1 = blk_rq_pos(rq1); s2 = blk_rq_pos(rq2); |
1da177e4c
|
348 |
|
6d048f531
|
349 |
last = cfqd->last_position; |
1da177e4c
|
350 |
|
1da177e4c
|
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 |
/* * by definition, 1KiB is 2 sectors */ back_max = cfqd->cfq_back_max * 2; /* * Strict one way elevator _except_ in the case where we allow * short backward seeks which are biased as twice the cost of a * similar forward seek. */ if (s1 >= last) d1 = s1 - last; else if (s1 + back_max >= last) d1 = (last - s1) * cfqd->cfq_back_penalty; else |
e8a99053e
|
366 |
wrap |= CFQ_RQ1_WRAP; |
1da177e4c
|
367 368 369 370 371 372 |
if (s2 >= last) d2 = s2 - last; else if (s2 + back_max >= last) d2 = (last - s2) * cfqd->cfq_back_penalty; else |
e8a99053e
|
373 |
wrap |= CFQ_RQ2_WRAP; |
1da177e4c
|
374 375 |
/* Found required data */ |
e8a99053e
|
376 377 378 379 380 381 |
/* * By doing switch() on the bit mask "wrap" we avoid having to * check two variables for all permutations: --> faster! */ switch (wrap) { |
5e7053747
|
382 |
case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ |
e8a99053e
|
383 |
if (d1 < d2) |
5e7053747
|
384 |
return rq1; |
e8a99053e
|
385 |
else if (d2 < d1) |
5e7053747
|
386 |
return rq2; |
e8a99053e
|
387 388 |
else { if (s1 >= s2) |
5e7053747
|
389 |
return rq1; |
e8a99053e
|
390 |
else |
5e7053747
|
391 |
return rq2; |
e8a99053e
|
392 |
} |
1da177e4c
|
393 |
|
e8a99053e
|
394 |
case CFQ_RQ2_WRAP: |
5e7053747
|
395 |
return rq1; |
e8a99053e
|
396 |
case CFQ_RQ1_WRAP: |
5e7053747
|
397 398 |
return rq2; case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ |
e8a99053e
|
399 400 401 402 403 404 405 406 |
default: /* * Since both rqs are wrapped, * start with the one that's further behind head * (--> only *one* back seek required), * since back seek takes more time than forward. */ if (s1 <= s2) |
5e7053747
|
407 |
return rq1; |
1da177e4c
|
408 |
else |
5e7053747
|
409 |
return rq2; |
1da177e4c
|
410 411 |
} } |
498d3aa2b
|
412 413 414 |
/* * The below is leftmost cache rbtree addon */ |
0871714e0
|
415 |
static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) |
cc09e2990
|
416 417 418 |
{ if (!root->left) root->left = rb_first(&root->rb); |
0871714e0
|
419 420 421 422 |
if (root->left) return rb_entry(root->left, struct cfq_queue, rb_node); return NULL; |
cc09e2990
|
423 |
} |
a36e71f99
|
424 425 426 427 428 |
static void rb_erase_init(struct rb_node *n, struct rb_root *root) { rb_erase(n, root); RB_CLEAR_NODE(n); } |
cc09e2990
|
429 430 431 432 |
static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) { if (root->left == n) root->left = NULL; |
a36e71f99
|
433 |
rb_erase_init(n, &root->rb); |
cc09e2990
|
434 |
} |
1da177e4c
|
435 436 437 |
/* * would be nice to take fifo expire time into account as well */ |
5e7053747
|
438 439 440 |
static struct request * cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *last) |
1da177e4c
|
441 |
{ |
21183b07e
|
442 443 |
struct rb_node *rbnext = rb_next(&last->rb_node); struct rb_node *rbprev = rb_prev(&last->rb_node); |
5e7053747
|
444 |
struct request *next = NULL, *prev = NULL; |
1da177e4c
|
445 |
|
21183b07e
|
446 |
BUG_ON(RB_EMPTY_NODE(&last->rb_node)); |
1da177e4c
|
447 448 |
if (rbprev) |
5e7053747
|
449 |
prev = rb_entry_rq(rbprev); |
1da177e4c
|
450 |
|
21183b07e
|
451 |
if (rbnext) |
5e7053747
|
452 |
next = rb_entry_rq(rbnext); |
21183b07e
|
453 454 455 |
else { rbnext = rb_first(&cfqq->sort_list); if (rbnext && rbnext != &last->rb_node) |
5e7053747
|
456 |
next = rb_entry_rq(rbnext); |
21183b07e
|
457 |
} |
1da177e4c
|
458 |
|
21183b07e
|
459 |
return cfq_choose_req(cfqd, next, prev); |
1da177e4c
|
460 |
} |
d9e7620e6
|
461 462 |
static unsigned long cfq_slice_offset(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
463 |
{ |
d9e7620e6
|
464 465 466 |
/* * just an approximation, should be ok. */ |
67e6b49e3
|
467 468 |
return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) - cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); |
d9e7620e6
|
469 |
} |
498d3aa2b
|
470 471 472 473 474 |
/* * The cfqd->service_tree holds all pending cfq_queue's that have * requests waiting to be processed. It is sorted in the order that * we will service the queues. */ |
a36e71f99
|
475 476 |
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, int add_front) |
d9e7620e6
|
477 |
{ |
0871714e0
|
478 479 |
struct rb_node **p, *parent; struct cfq_queue *__cfqq; |
d9e7620e6
|
480 |
unsigned long rb_key; |
498d3aa2b
|
481 |
int left; |
d9e7620e6
|
482 |
|
0871714e0
|
483 484 485 486 487 488 489 490 491 |
if (cfq_class_idle(cfqq)) { rb_key = CFQ_IDLE_DELAY; parent = rb_last(&cfqd->service_tree.rb); if (parent && parent != &cfqq->rb_node) { __cfqq = rb_entry(parent, struct cfq_queue, rb_node); rb_key += __cfqq->rb_key; } else rb_key += jiffies; } else if (!add_front) { |
edd75ffd9
|
492 493 494 495 496 |
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; rb_key += cfqq->slice_resid; cfqq->slice_resid = 0; } else rb_key = 0; |
1da177e4c
|
497 |
|
d9e7620e6
|
498 |
if (!RB_EMPTY_NODE(&cfqq->rb_node)) { |
99f9628ab
|
499 |
/* |
d9e7620e6
|
500 |
* same position, nothing more to do |
99f9628ab
|
501 |
*/ |
d9e7620e6
|
502 503 |
if (rb_key == cfqq->rb_key) return; |
1da177e4c
|
504 |
|
cc09e2990
|
505 |
cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); |
1da177e4c
|
506 |
} |
d9e7620e6
|
507 |
|
498d3aa2b
|
508 |
left = 1; |
0871714e0
|
509 510 |
parent = NULL; p = &cfqd->service_tree.rb.rb_node; |
d9e7620e6
|
511 |
while (*p) { |
67060e379
|
512 |
struct rb_node **n; |
cc09e2990
|
513 |
|
d9e7620e6
|
514 515 |
parent = *p; __cfqq = rb_entry(parent, struct cfq_queue, rb_node); |
0c534e0a4
|
516 517 |
/* * sort RT queues first, we always want to give |
67060e379
|
518 519 |
* preference to them. IDLE queues goes to the back. * after that, sort on the next service time. |
0c534e0a4
|
520 521 |
*/ if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) |
67060e379
|
522 |
n = &(*p)->rb_left; |
0c534e0a4
|
523 |
else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) |
67060e379
|
524 525 526 527 528 |
n = &(*p)->rb_right; else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq)) n = &(*p)->rb_left; else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) n = &(*p)->rb_right; |
0c534e0a4
|
529 |
else if (rb_key < __cfqq->rb_key) |
67060e379
|
530 531 532 533 534 |
n = &(*p)->rb_left; else n = &(*p)->rb_right; if (n == &(*p)->rb_right) |
cc09e2990
|
535 |
left = 0; |
67060e379
|
536 537 |
p = n; |
d9e7620e6
|
538 |
} |
cc09e2990
|
539 540 |
if (left) cfqd->service_tree.left = &cfqq->rb_node; |
d9e7620e6
|
541 542 |
cfqq->rb_key = rb_key; rb_link_node(&cfqq->rb_node, parent, p); |
cc09e2990
|
543 |
rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); |
1da177e4c
|
544 |
} |
a36e71f99
|
545 |
static struct cfq_queue * |
f2d1f0ae7
|
546 547 548 |
cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, sector_t sector, struct rb_node **ret_parent, struct rb_node ***rb_link) |
a36e71f99
|
549 |
{ |
a36e71f99
|
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 |
struct rb_node **p, *parent; struct cfq_queue *cfqq = NULL; parent = NULL; p = &root->rb_node; while (*p) { struct rb_node **n; parent = *p; cfqq = rb_entry(parent, struct cfq_queue, p_node); /* * Sort strictly based on sector. Smallest to the left, * largest to the right. */ |
2e46e8b27
|
565 |
if (sector > blk_rq_pos(cfqq->next_rq)) |
a36e71f99
|
566 |
n = &(*p)->rb_right; |
2e46e8b27
|
567 |
else if (sector < blk_rq_pos(cfqq->next_rq)) |
a36e71f99
|
568 569 570 571 |
n = &(*p)->rb_left; else break; p = n; |
3ac6c9f8a
|
572 |
cfqq = NULL; |
a36e71f99
|
573 574 575 576 577 |
} *ret_parent = parent; if (rb_link) *rb_link = p; |
3ac6c9f8a
|
578 |
return cfqq; |
a36e71f99
|
579 580 581 582 |
} static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) { |
a36e71f99
|
583 584 |
struct rb_node **p, *parent; struct cfq_queue *__cfqq; |
f2d1f0ae7
|
585 586 587 588 |
if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } |
a36e71f99
|
589 590 591 592 593 |
if (cfq_class_idle(cfqq)) return; if (!cfqq->next_rq) return; |
f2d1f0ae7
|
594 |
cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; |
2e46e8b27
|
595 596 |
__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, blk_rq_pos(cfqq->next_rq), &parent, &p); |
3ac6c9f8a
|
597 598 |
if (!__cfqq) { rb_link_node(&cfqq->p_node, parent, p); |
f2d1f0ae7
|
599 600 601 |
rb_insert_color(&cfqq->p_node, cfqq->p_root); } else cfqq->p_root = NULL; |
a36e71f99
|
602 |
} |
498d3aa2b
|
603 604 605 |
/* * Update cfqq's position in the service tree. */ |
edd75ffd9
|
606 |
static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
6d048f531
|
607 |
{ |
6d048f531
|
608 609 610 |
/* * Resorting requires the cfqq to be on the RR list already. */ |
a36e71f99
|
611 |
if (cfq_cfqq_on_rr(cfqq)) { |
edd75ffd9
|
612 |
cfq_service_tree_add(cfqd, cfqq, 0); |
a36e71f99
|
613 614 |
cfq_prio_tree_add(cfqd, cfqq); } |
6d048f531
|
615 |
} |
1da177e4c
|
616 617 |
/* * add to busy list of queues for service, trying to be fair in ordering |
22e2c507c
|
618 |
* the pending list according to last request service |
1da177e4c
|
619 |
*/ |
febffd618
|
620 |
static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
621 |
{ |
7b679138b
|
622 |
cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); |
3b18152c3
|
623 624 |
BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_mark_cfqq_on_rr(cfqq); |
1da177e4c
|
625 |
cfqd->busy_queues++; |
3a9a3f6cc
|
626 627 |
if (cfq_class_rt(cfqq)) cfqd->busy_rt_queues++; |
1da177e4c
|
628 |
|
edd75ffd9
|
629 |
cfq_resort_rr_list(cfqd, cfqq); |
1da177e4c
|
630 |
} |
498d3aa2b
|
631 632 633 634 |
/* * Called when the cfqq no longer has requests pending, remove it from * the service tree. */ |
febffd618
|
635 |
static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
636 |
{ |
7b679138b
|
637 |
cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); |
3b18152c3
|
638 639 |
BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_clear_cfqq_on_rr(cfqq); |
1da177e4c
|
640 |
|
cc09e2990
|
641 642 |
if (!RB_EMPTY_NODE(&cfqq->rb_node)) cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); |
f2d1f0ae7
|
643 644 645 646 |
if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } |
d9e7620e6
|
647 |
|
1da177e4c
|
648 649 |
BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; |
3a9a3f6cc
|
650 651 |
if (cfq_class_rt(cfqq)) cfqd->busy_rt_queues--; |
1da177e4c
|
652 653 654 655 656 |
} /* * rb tree support functions */ |
febffd618
|
657 |
static void cfq_del_rq_rb(struct request *rq) |
1da177e4c
|
658 |
{ |
5e7053747
|
659 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
b4878f245
|
660 |
struct cfq_data *cfqd = cfqq->cfqd; |
5e7053747
|
661 |
const int sync = rq_is_sync(rq); |
1da177e4c
|
662 |
|
b4878f245
|
663 664 |
BUG_ON(!cfqq->queued[sync]); cfqq->queued[sync]--; |
1da177e4c
|
665 |
|
5e7053747
|
666 |
elv_rb_del(&cfqq->sort_list, rq); |
1da177e4c
|
667 |
|
dd67d0515
|
668 |
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) |
b4878f245
|
669 |
cfq_del_cfqq_rr(cfqd, cfqq); |
1da177e4c
|
670 |
} |
5e7053747
|
671 |
static void cfq_add_rq_rb(struct request *rq) |
1da177e4c
|
672 |
{ |
5e7053747
|
673 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
1da177e4c
|
674 |
struct cfq_data *cfqd = cfqq->cfqd; |
a36e71f99
|
675 |
struct request *__alias, *prev; |
1da177e4c
|
676 |
|
5380a101d
|
677 |
cfqq->queued[rq_is_sync(rq)]++; |
1da177e4c
|
678 679 680 681 682 |
/* * looks a little odd, but the first insert might return an alias. * if that happens, put the alias on the dispatch list */ |
21183b07e
|
683 |
while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) |
5e7053747
|
684 |
cfq_dispatch_insert(cfqd->queue, __alias); |
5fccbf61b
|
685 686 687 |
if (!cfq_cfqq_on_rr(cfqq)) cfq_add_cfqq_rr(cfqd, cfqq); |
5044eed48
|
688 689 690 691 |
/* * check if this request is a better next-serve candidate */ |
a36e71f99
|
692 |
prev = cfqq->next_rq; |
5044eed48
|
693 |
cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); |
a36e71f99
|
694 695 696 697 698 699 |
/* * adjust priority tree position, if ->next_rq changes */ if (prev != cfqq->next_rq) cfq_prio_tree_add(cfqd, cfqq); |
5044eed48
|
700 |
BUG_ON(!cfqq->next_rq); |
1da177e4c
|
701 |
} |
febffd618
|
702 |
static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) |
1da177e4c
|
703 |
{ |
5380a101d
|
704 705 |
elv_rb_del(&cfqq->sort_list, rq); cfqq->queued[rq_is_sync(rq)]--; |
5e7053747
|
706 |
cfq_add_rq_rb(rq); |
1da177e4c
|
707 |
} |
206dc69b3
|
708 709 |
static struct request * cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) |
1da177e4c
|
710 |
{ |
206dc69b3
|
711 |
struct task_struct *tsk = current; |
91fac317a
|
712 |
struct cfq_io_context *cic; |
206dc69b3
|
713 |
struct cfq_queue *cfqq; |
1da177e4c
|
714 |
|
4ac845a2e
|
715 |
cic = cfq_cic_lookup(cfqd, tsk->io_context); |
91fac317a
|
716 717 718 719 |
if (!cic) return NULL; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); |
89850f7ee
|
720 721 |
if (cfqq) { sector_t sector = bio->bi_sector + bio_sectors(bio); |
21183b07e
|
722 |
return elv_rb_find(&cfqq->sort_list, sector); |
89850f7ee
|
723 |
} |
1da177e4c
|
724 |
|
1da177e4c
|
725 726 |
return NULL; } |
165125e1e
|
727 |
static void cfq_activate_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
728 |
{ |
22e2c507c
|
729 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
3b18152c3
|
730 |
|
b4878f245
|
731 |
cfqd->rq_in_driver++; |
7b679138b
|
732 733 |
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", cfqd->rq_in_driver); |
25776e359
|
734 |
|
5b93629b4
|
735 |
cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); |
1da177e4c
|
736 |
} |
165125e1e
|
737 |
static void cfq_deactivate_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
738 |
{ |
b4878f245
|
739 740 741 742 |
struct cfq_data *cfqd = q->elevator->elevator_data; WARN_ON(!cfqd->rq_in_driver); cfqd->rq_in_driver--; |
7b679138b
|
743 744 |
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", cfqd->rq_in_driver); |
1da177e4c
|
745 |
} |
b4878f245
|
746 |
static void cfq_remove_request(struct request *rq) |
1da177e4c
|
747 |
{ |
5e7053747
|
748 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
21183b07e
|
749 |
|
5e7053747
|
750 751 |
if (cfqq->next_rq == rq) cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); |
1da177e4c
|
752 |
|
b4878f245
|
753 |
list_del_init(&rq->queuelist); |
5e7053747
|
754 |
cfq_del_rq_rb(rq); |
374f84ac3
|
755 |
|
45333d5a3
|
756 |
cfqq->cfqd->rq_queued--; |
374f84ac3
|
757 758 759 760 |
if (rq_is_meta(rq)) { WARN_ON(!cfqq->meta_pending); cfqq->meta_pending--; } |
1da177e4c
|
761 |
} |
165125e1e
|
762 763 |
static int cfq_merge(struct request_queue *q, struct request **req, struct bio *bio) |
1da177e4c
|
764 765 766 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; struct request *__rq; |
1da177e4c
|
767 |
|
206dc69b3
|
768 |
__rq = cfq_find_rq_fmerge(cfqd, bio); |
22e2c507c
|
769 |
if (__rq && elv_rq_merge_ok(__rq, bio)) { |
9817064b6
|
770 771 |
*req = __rq; return ELEVATOR_FRONT_MERGE; |
1da177e4c
|
772 773 774 |
} return ELEVATOR_NO_MERGE; |
1da177e4c
|
775 |
} |
165125e1e
|
776 |
static void cfq_merged_request(struct request_queue *q, struct request *req, |
21183b07e
|
777 |
int type) |
1da177e4c
|
778 |
{ |
21183b07e
|
779 |
if (type == ELEVATOR_FRONT_MERGE) { |
5e7053747
|
780 |
struct cfq_queue *cfqq = RQ_CFQQ(req); |
1da177e4c
|
781 |
|
5e7053747
|
782 |
cfq_reposition_rq_rb(cfqq, req); |
1da177e4c
|
783 |
} |
1da177e4c
|
784 785 786 |
} static void |
165125e1e
|
787 |
cfq_merged_requests(struct request_queue *q, struct request *rq, |
1da177e4c
|
788 789 |
struct request *next) { |
22e2c507c
|
790 791 792 793 794 795 |
/* * reposition in fifo if next is older than rq */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && time_before(next->start_time, rq->start_time)) list_move(&rq->queuelist, &next->queuelist); |
b4878f245
|
796 |
cfq_remove_request(next); |
22e2c507c
|
797 |
} |
165125e1e
|
798 |
static int cfq_allow_merge(struct request_queue *q, struct request *rq, |
da7752650
|
799 800 801 |
struct bio *bio) { struct cfq_data *cfqd = q->elevator->elevator_data; |
91fac317a
|
802 |
struct cfq_io_context *cic; |
da7752650
|
803 |
struct cfq_queue *cfqq; |
da7752650
|
804 805 |
/* |
ec8acb690
|
806 |
* Disallow merge of a sync bio into an async request. |
da7752650
|
807 |
*/ |
91fac317a
|
808 |
if (cfq_bio_sync(bio) && !rq_is_sync(rq)) |
da7752650
|
809 810 811 |
return 0; /* |
719d34027
|
812 813 |
* Lookup the cfqq that this bio will be queued with. Allow * merge only if rq is queued there. |
da7752650
|
814 |
*/ |
4ac845a2e
|
815 |
cic = cfq_cic_lookup(cfqd, current->io_context); |
91fac317a
|
816 817 |
if (!cic) return 0; |
719d34027
|
818 |
|
91fac317a
|
819 |
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); |
719d34027
|
820 821 |
if (cfqq == RQ_CFQQ(rq)) return 1; |
da7752650
|
822 |
|
ec8acb690
|
823 |
return 0; |
da7752650
|
824 |
} |
febffd618
|
825 826 |
static void __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
22e2c507c
|
827 828 |
{ if (cfqq) { |
7b679138b
|
829 |
cfq_log_cfqq(cfqd, cfqq, "set_active"); |
22e2c507c
|
830 |
cfqq->slice_end = 0; |
2f5cb7381
|
831 |
cfqq->slice_dispatch = 0; |
2f5cb7381
|
832 |
cfq_clear_cfqq_wait_request(cfqq); |
b029195dd
|
833 |
cfq_clear_cfqq_must_dispatch(cfqq); |
3b18152c3
|
834 835 |
cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_fifo_expire(cfqq); |
44f7c1606
|
836 |
cfq_mark_cfqq_slice_new(cfqq); |
2f5cb7381
|
837 838 |
del_timer(&cfqd->idle_slice_timer); |
22e2c507c
|
839 840 841 842 843 844 |
} cfqd->active_queue = cfqq; } /* |
7b14e3b52
|
845 846 847 848 |
* current cfqq expired its slice (or was too idle), select new one */ static void __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
6084cdda0
|
849 |
int timed_out) |
7b14e3b52
|
850 |
{ |
7b679138b
|
851 |
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); |
7b14e3b52
|
852 853 |
if (cfq_cfqq_wait_request(cfqq)) del_timer(&cfqd->idle_slice_timer); |
7b14e3b52
|
854 855 856 |
cfq_clear_cfqq_wait_request(cfqq); /* |
6084cdda0
|
857 |
* store what was left of this slice, if the queue idled/timed out |
7b14e3b52
|
858 |
*/ |
7b679138b
|
859 |
if (timed_out && !cfq_cfqq_slice_new(cfqq)) { |
c5b680f3b
|
860 |
cfqq->slice_resid = cfqq->slice_end - jiffies; |
7b679138b
|
861 862 |
cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); } |
7b14e3b52
|
863 |
|
edd75ffd9
|
864 |
cfq_resort_rr_list(cfqd, cfqq); |
7b14e3b52
|
865 866 867 868 869 870 871 872 |
if (cfqq == cfqd->active_queue) cfqd->active_queue = NULL; if (cfqd->active_cic) { put_io_context(cfqd->active_cic->ioc); cfqd->active_cic = NULL; } |
7b14e3b52
|
873 |
} |
6084cdda0
|
874 |
static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) |
7b14e3b52
|
875 876 877 878 |
{ struct cfq_queue *cfqq = cfqd->active_queue; if (cfqq) |
6084cdda0
|
879 |
__cfq_slice_expired(cfqd, cfqq, timed_out); |
7b14e3b52
|
880 |
} |
498d3aa2b
|
881 882 883 884 |
/* * Get next queue for service. Unless we have a queue preemption, * we'll simply select the first cfqq in the service tree. */ |
6d048f531
|
885 |
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) |
22e2c507c
|
886 |
{ |
edd75ffd9
|
887 888 |
if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) return NULL; |
d9e7620e6
|
889 |
|
0871714e0
|
890 |
return cfq_rb_first(&cfqd->service_tree); |
6d048f531
|
891 |
} |
498d3aa2b
|
892 893 894 |
/* * Get and set a new active queue for service. */ |
a36e71f99
|
895 896 |
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
6d048f531
|
897 |
{ |
a36e71f99
|
898 899 900 901 902 |
if (!cfqq) { cfqq = cfq_get_next_queue(cfqd); if (cfqq) cfq_clear_cfqq_coop(cfqq); } |
6d048f531
|
903 |
|
22e2c507c
|
904 |
__cfq_set_active_queue(cfqd, cfqq); |
3b18152c3
|
905 |
return cfqq; |
22e2c507c
|
906 |
} |
d9e7620e6
|
907 908 909 |
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, struct request *rq) { |
83096ebf1
|
910 911 |
if (blk_rq_pos(rq) >= cfqd->last_position) return blk_rq_pos(rq) - cfqd->last_position; |
d9e7620e6
|
912 |
else |
83096ebf1
|
913 |
return cfqd->last_position - blk_rq_pos(rq); |
d9e7620e6
|
914 |
} |
04dc6e71a
|
915 916 |
#define CIC_SEEK_THR 8 * 1024 #define CIC_SEEKY(cic) ((cic)->seek_mean > CIC_SEEK_THR) |
6d048f531
|
917 918 919 |
static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) { struct cfq_io_context *cic = cfqd->active_cic; |
04dc6e71a
|
920 |
sector_t sdist = cic->seek_mean; |
6d048f531
|
921 922 |
if (!sample_valid(cic->seek_samples)) |
04dc6e71a
|
923 |
sdist = CIC_SEEK_THR; |
6d048f531
|
924 |
|
04dc6e71a
|
925 |
return cfq_dist_from_last(cfqd, rq) <= sdist; |
6d048f531
|
926 |
} |
a36e71f99
|
927 928 929 |
static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, struct cfq_queue *cur_cfqq) { |
f2d1f0ae7
|
930 |
struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; |
a36e71f99
|
931 932 933 934 935 936 937 938 939 940 941 |
struct rb_node *parent, *node; struct cfq_queue *__cfqq; sector_t sector = cfqd->last_position; if (RB_EMPTY_ROOT(root)) return NULL; /* * First, if we find a request starting at the end of the last * request, choose it. */ |
f2d1f0ae7
|
942 |
__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); |
a36e71f99
|
943 944 945 946 947 948 949 950 951 952 |
if (__cfqq) return __cfqq; /* * If the exact sector wasn't found, the parent of the NULL leaf * will contain the closest sector. */ __cfqq = rb_entry(parent, struct cfq_queue, p_node); if (cfq_rq_close(cfqd, __cfqq->next_rq)) return __cfqq; |
2e46e8b27
|
953 |
if (blk_rq_pos(__cfqq->next_rq) < sector) |
a36e71f99
|
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 |
node = rb_next(&__cfqq->p_node); else node = rb_prev(&__cfqq->p_node); if (!node) return NULL; __cfqq = rb_entry(node, struct cfq_queue, p_node); if (cfq_rq_close(cfqd, __cfqq->next_rq)) return __cfqq; return NULL; } /* * cfqd - obvious * cur_cfqq - passed in so that we don't decide that the current queue is * closely cooperating with itself. * * So, basically we're assuming that that cur_cfqq has dispatched at least * one request, and that cfqd->last_position reflects a position on the disk * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid * assumption. */ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, struct cfq_queue *cur_cfqq, int probe) |
6d048f531
|
980 |
{ |
a36e71f99
|
981 982 983 984 985 986 987 988 |
struct cfq_queue *cfqq; /* * A valid cfq_io_context is necessary to compare requests against * the seek_mean of the current cfqq. */ if (!cfqd->active_cic) return NULL; |
6d048f531
|
989 |
/* |
d9e7620e6
|
990 991 992 |
* We should notice if some of the queues are cooperating, eg * working closely on the same area of the disk. In that case, * we can group them together and don't waste time idling. |
6d048f531
|
993 |
*/ |
a36e71f99
|
994 995 996 997 998 999 1000 1001 1002 1003 |
cfqq = cfqq_close(cfqd, cur_cfqq); if (!cfqq) return NULL; if (cfq_cfqq_coop(cfqq)) return NULL; if (!probe) cfq_mark_cfqq_coop(cfqq); return cfqq; |
6d048f531
|
1004 |
} |
6d048f531
|
1005 |
static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
22e2c507c
|
1006 |
{ |
1792669cc
|
1007 |
struct cfq_queue *cfqq = cfqd->active_queue; |
206dc69b3
|
1008 |
struct cfq_io_context *cic; |
7b14e3b52
|
1009 |
unsigned long sl; |
a68bbddba
|
1010 |
/* |
f7d7b7a7a
|
1011 1012 1013 |
* SSD device without seek penalty, disable idling. But only do so * for devices that support queuing, otherwise we still have a problem * with sync vs async workloads. |
a68bbddba
|
1014 |
*/ |
f7d7b7a7a
|
1015 |
if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) |
a68bbddba
|
1016 |
return; |
dd67d0515
|
1017 |
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); |
6d048f531
|
1018 |
WARN_ON(cfq_cfqq_slice_new(cfqq)); |
22e2c507c
|
1019 1020 1021 1022 |
/* * idle is disabled, either manually or by past process history */ |
6d048f531
|
1023 1024 |
if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) return; |
22e2c507c
|
1025 |
/* |
7b679138b
|
1026 1027 1028 1029 1030 1031 |
* still requests with the driver, don't idle */ if (cfqd->rq_in_driver) return; /* |
22e2c507c
|
1032 1033 |
* task has exited, don't wait */ |
206dc69b3
|
1034 |
cic = cfqd->active_cic; |
66dac98ed
|
1035 |
if (!cic || !atomic_read(&cic->ioc->nr_tasks)) |
6d048f531
|
1036 |
return; |
3b18152c3
|
1037 |
cfq_mark_cfqq_wait_request(cfqq); |
22e2c507c
|
1038 |
|
206dc69b3
|
1039 1040 1041 1042 1043 |
/* * we don't want to idle for seeks, but we do want to allow * fair distribution of slice time for a process doing back-to-back * seeks. so allow a little bit of time for him to submit a new rq */ |
6d048f531
|
1044 |
sl = cfqd->cfq_slice_idle; |
caaa5f9f0
|
1045 |
if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) |
d9e7620e6
|
1046 |
sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); |
206dc69b3
|
1047 |
|
7b14e3b52
|
1048 |
mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
9481ffdc6
|
1049 |
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); |
1da177e4c
|
1050 |
} |
498d3aa2b
|
1051 1052 1053 |
/* * Move request from internal lists to the request queue dispatch list. */ |
165125e1e
|
1054 |
static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) |
1da177e4c
|
1055 |
{ |
3ed9a2965
|
1056 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
5e7053747
|
1057 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
22e2c507c
|
1058 |
|
7b679138b
|
1059 |
cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); |
5380a101d
|
1060 |
cfq_remove_request(rq); |
6d048f531
|
1061 |
cfqq->dispatched++; |
5380a101d
|
1062 |
elv_dispatch_sort(q, rq); |
3ed9a2965
|
1063 1064 1065 |
if (cfq_cfqq_sync(cfqq)) cfqd->sync_flight++; |
1da177e4c
|
1066 1067 1068 1069 1070 |
} /* * return expired entry, or NULL to just start from scratch in rbtree */ |
febffd618
|
1071 |
static struct request *cfq_check_fifo(struct cfq_queue *cfqq) |
1da177e4c
|
1072 1073 |
{ struct cfq_data *cfqd = cfqq->cfqd; |
22e2c507c
|
1074 |
struct request *rq; |
89850f7ee
|
1075 |
int fifo; |
1da177e4c
|
1076 |
|
3b18152c3
|
1077 |
if (cfq_cfqq_fifo_expire(cfqq)) |
1da177e4c
|
1078 |
return NULL; |
cb8874119
|
1079 1080 |
cfq_mark_cfqq_fifo_expire(cfqq); |
89850f7ee
|
1081 1082 |
if (list_empty(&cfqq->fifo)) return NULL; |
1da177e4c
|
1083 |
|
6d048f531
|
1084 |
fifo = cfq_cfqq_sync(cfqq); |
89850f7ee
|
1085 |
rq = rq_entry_fifo(cfqq->fifo.next); |
1da177e4c
|
1086 |
|
6d048f531
|
1087 |
if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) |
7b679138b
|
1088 |
rq = NULL; |
1da177e4c
|
1089 |
|
7b679138b
|
1090 |
cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); |
6d048f531
|
1091 |
return rq; |
1da177e4c
|
1092 |
} |
22e2c507c
|
1093 1094 1095 1096 |
static inline int cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { const int base_rq = cfqd->cfq_slice_async_rq; |
1da177e4c
|
1097 |
|
22e2c507c
|
1098 |
WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); |
1da177e4c
|
1099 |
|
22e2c507c
|
1100 |
return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); |
1da177e4c
|
1101 |
} |
22e2c507c
|
1102 |
/* |
498d3aa2b
|
1103 1104 |
* Select a queue for service. If we have a current active queue, * check whether to continue servicing it, or retrieve and set a new one. |
22e2c507c
|
1105 |
*/ |
1b5ed5e1f
|
1106 |
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) |
1da177e4c
|
1107 |
{ |
a36e71f99
|
1108 |
struct cfq_queue *cfqq, *new_cfqq = NULL; |
1da177e4c
|
1109 |
|
22e2c507c
|
1110 1111 1112 |
cfqq = cfqd->active_queue; if (!cfqq) goto new_queue; |
1da177e4c
|
1113 |
|
22e2c507c
|
1114 |
/* |
6d048f531
|
1115 |
* The active queue has run out of time, expire it and select new. |
22e2c507c
|
1116 |
*/ |
b029195dd
|
1117 |
if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) |
3b18152c3
|
1118 |
goto expire; |
1da177e4c
|
1119 |
|
22e2c507c
|
1120 |
/* |
3a9a3f6cc
|
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 |
* If we have a RT cfqq waiting, then we pre-empt the current non-rt * cfqq. */ if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) { /* * We simulate this as cfqq timed out so that it gets to bank * the remaining of its time slice. */ cfq_log_cfqq(cfqd, cfqq, "preempt"); cfq_slice_expired(cfqd, 1); goto new_queue; } /* |
6d048f531
|
1135 1136 |
* The active queue has requests and isn't expired, allow it to * dispatch. |
22e2c507c
|
1137 |
*/ |
dd67d0515
|
1138 |
if (!RB_EMPTY_ROOT(&cfqq->sort_list)) |
22e2c507c
|
1139 |
goto keep_queue; |
6d048f531
|
1140 1141 |
/* |
a36e71f99
|
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 |
* If another queue has a request waiting within our mean seek * distance, let it run. The expire code will check for close * cooperators and put the close queue at the front of the service * tree. */ new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0); if (new_cfqq) goto expire; /* |
6d048f531
|
1152 1153 1154 1155 |
* No requests pending. If the active queue still has requests in * flight or is idling for a new request, allow either of these * conditions to happen (or time out) before selecting a new queue. */ |
cc1974797
|
1156 1157 |
if (timer_pending(&cfqd->idle_slice_timer) || (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { |
caaa5f9f0
|
1158 1159 |
cfqq = NULL; goto keep_queue; |
22e2c507c
|
1160 |
} |
3b18152c3
|
1161 |
expire: |
6084cdda0
|
1162 |
cfq_slice_expired(cfqd, 0); |
3b18152c3
|
1163 |
new_queue: |
a36e71f99
|
1164 |
cfqq = cfq_set_active_queue(cfqd, new_cfqq); |
22e2c507c
|
1165 |
keep_queue: |
3b18152c3
|
1166 |
return cfqq; |
22e2c507c
|
1167 |
} |
febffd618
|
1168 |
static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) |
d9e7620e6
|
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 |
{ int dispatched = 0; while (cfqq->next_rq) { cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); dispatched++; } BUG_ON(!list_empty(&cfqq->fifo)); return dispatched; } |
498d3aa2b
|
1180 1181 1182 1183 |
/* * Drain our current requests. Used for barriers and when switching * io schedulers on-the-fly. */ |
d9e7620e6
|
1184 |
static int cfq_forced_dispatch(struct cfq_data *cfqd) |
1b5ed5e1f
|
1185 |
{ |
0871714e0
|
1186 |
struct cfq_queue *cfqq; |
d9e7620e6
|
1187 |
int dispatched = 0; |
1b5ed5e1f
|
1188 |
|
0871714e0
|
1189 |
while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) |
d9e7620e6
|
1190 |
dispatched += __cfq_forced_dispatch_cfqq(cfqq); |
1b5ed5e1f
|
1191 |
|
6084cdda0
|
1192 |
cfq_slice_expired(cfqd, 0); |
1b5ed5e1f
|
1193 1194 |
BUG_ON(cfqd->busy_queues); |
6923715ae
|
1195 |
cfq_log(cfqd, "forced_dispatch=%d", dispatched); |
1b5ed5e1f
|
1196 1197 |
return dispatched; } |
2f5cb7381
|
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 |
/* * Dispatch a request from cfqq, moving them to the request queue * dispatch list. */ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct request *rq; BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); /* * follow expired path, else get first next available */ rq = cfq_check_fifo(cfqq); if (!rq) rq = cfqq->next_rq; /* * insert request into driver dispatch list */ cfq_dispatch_insert(cfqd->queue, rq); if (!cfqd->active_cic) { struct cfq_io_context *cic = RQ_CIC(rq); |
d9c7d394a
|
1222 |
atomic_long_inc(&cic->ioc->refcount); |
2f5cb7381
|
1223 1224 1225 1226 1227 1228 1229 1230 |
cfqd->active_cic = cic; } } /* * Find the cfqq that we need to service and move a request from that to the * dispatch list */ |
165125e1e
|
1231 |
static int cfq_dispatch_requests(struct request_queue *q, int force) |
22e2c507c
|
1232 1233 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; |
6d048f531
|
1234 |
struct cfq_queue *cfqq; |
2f5cb7381
|
1235 |
unsigned int max_dispatch; |
22e2c507c
|
1236 1237 1238 |
if (!cfqd->busy_queues) return 0; |
1b5ed5e1f
|
1239 1240 |
if (unlikely(force)) return cfq_forced_dispatch(cfqd); |
2f5cb7381
|
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 |
cfqq = cfq_select_queue(cfqd); if (!cfqq) return 0; /* * If this is an async queue and we have sync IO in flight, let it wait */ if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) return 0; max_dispatch = cfqd->cfq_quantum; if (cfq_class_idle(cfqq)) max_dispatch = 1; |
b4878f245
|
1254 |
|
2f5cb7381
|
1255 1256 1257 1258 1259 1260 1261 |
/* * Does this cfqq already have too much IO in flight? */ if (cfqq->dispatched >= max_dispatch) { /* * idle queue must always only have a single IO in flight */ |
3ed9a2965
|
1262 |
if (cfq_class_idle(cfqq)) |
2f5cb7381
|
1263 |
return 0; |
3ed9a2965
|
1264 |
|
2f5cb7381
|
1265 1266 1267 1268 1269 |
/* * We have other queues, don't allow more IO from this one */ if (cfqd->busy_queues > 1) return 0; |
9ede209e8
|
1270 |
|
2f5cb7381
|
1271 1272 1273 1274 1275 1276 |
/* * we are the only queue, allow up to 4 times of 'quantum' */ if (cfqq->dispatched >= 4 * max_dispatch) return 0; } |
3ed9a2965
|
1277 |
|
2f5cb7381
|
1278 1279 1280 1281 1282 |
/* * Dispatch a request from this cfqq */ cfq_dispatch_request(cfqd, cfqq); cfqq->slice_dispatch++; |
b029195dd
|
1283 |
cfq_clear_cfqq_must_dispatch(cfqq); |
22e2c507c
|
1284 |
|
2f5cb7381
|
1285 1286 1287 1288 1289 1290 1291 1292 1293 |
/* * expire an async queue immediately if it has used up its slice. idle * queue always expire after 1 dispatch round. */ if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || cfq_class_idle(cfqq))) { cfqq->slice_end = jiffies + 1; cfq_slice_expired(cfqd, 0); |
1da177e4c
|
1294 |
} |
2f5cb7381
|
1295 1296 |
cfq_log(cfqd, "dispatched a request"); return 1; |
1da177e4c
|
1297 |
} |
1da177e4c
|
1298 |
/* |
5e7053747
|
1299 1300 |
* task holds one reference to the queue, dropped when task exits. each rq * in-flight on this queue also holds a reference, dropped when rq is freed. |
1da177e4c
|
1301 1302 1303 1304 1305 |
* * queue lock must be held here. */ static void cfq_put_queue(struct cfq_queue *cfqq) { |
22e2c507c
|
1306 1307 1308 |
struct cfq_data *cfqd = cfqq->cfqd; BUG_ON(atomic_read(&cfqq->ref) <= 0); |
1da177e4c
|
1309 1310 1311 |
if (!atomic_dec_and_test(&cfqq->ref)) return; |
7b679138b
|
1312 |
cfq_log_cfqq(cfqd, cfqq, "put_queue"); |
1da177e4c
|
1313 |
BUG_ON(rb_first(&cfqq->sort_list)); |
22e2c507c
|
1314 |
BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); |
3b18152c3
|
1315 |
BUG_ON(cfq_cfqq_on_rr(cfqq)); |
1da177e4c
|
1316 |
|
28f95cbc3
|
1317 |
if (unlikely(cfqd->active_queue == cfqq)) { |
6084cdda0
|
1318 |
__cfq_slice_expired(cfqd, cfqq, 0); |
28f95cbc3
|
1319 1320 |
cfq_schedule_dispatch(cfqd); } |
22e2c507c
|
1321 |
|
1da177e4c
|
1322 1323 |
kmem_cache_free(cfq_pool, cfqq); } |
d6de8be71
|
1324 1325 1326 |
/* * Must always be called with the rcu_read_lock() held */ |
07416d29b
|
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 |
static void __call_for_each_cic(struct io_context *ioc, void (*func)(struct io_context *, struct cfq_io_context *)) { struct cfq_io_context *cic; struct hlist_node *n; hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) func(ioc, cic); } |
4ac845a2e
|
1337 |
/* |
34e6bbf23
|
1338 |
* Call func for each cic attached to this ioc. |
4ac845a2e
|
1339 |
*/ |
34e6bbf23
|
1340 |
static void |
4ac845a2e
|
1341 1342 |
call_for_each_cic(struct io_context *ioc, void (*func)(struct io_context *, struct cfq_io_context *)) |
1da177e4c
|
1343 |
{ |
4ac845a2e
|
1344 |
rcu_read_lock(); |
07416d29b
|
1345 |
__call_for_each_cic(ioc, func); |
4ac845a2e
|
1346 |
rcu_read_unlock(); |
34e6bbf23
|
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 |
} static void cfq_cic_free_rcu(struct rcu_head *head) { struct cfq_io_context *cic; cic = container_of(head, struct cfq_io_context, rcu_head); kmem_cache_free(cfq_ioc_pool, cic); elv_ioc_count_dec(ioc_count); |
9a11b4ed0
|
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 |
if (ioc_gone) { /* * CFQ scheduler is exiting, grab exit lock and check * the pending io context count. If it hits zero, * complete ioc_gone and set it back to NULL */ spin_lock(&ioc_gone_lock); if (ioc_gone && !elv_ioc_count_read(ioc_count)) { complete(ioc_gone); ioc_gone = NULL; } spin_unlock(&ioc_gone_lock); } |
34e6bbf23
|
1370 |
} |
4ac845a2e
|
1371 |
|
34e6bbf23
|
1372 1373 1374 |
static void cfq_cic_free(struct cfq_io_context *cic) { call_rcu(&cic->rcu_head, cfq_cic_free_rcu); |
4ac845a2e
|
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 |
} static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) { unsigned long flags; BUG_ON(!cic->dead_key); spin_lock_irqsave(&ioc->lock, flags); radix_tree_delete(&ioc->radix_root, cic->dead_key); |
ffc4e7595
|
1385 |
hlist_del_rcu(&cic->cic_list); |
4ac845a2e
|
1386 |
spin_unlock_irqrestore(&ioc->lock, flags); |
34e6bbf23
|
1387 |
cfq_cic_free(cic); |
4ac845a2e
|
1388 |
} |
d6de8be71
|
1389 1390 1391 1392 1393 |
/* * Must be called with rcu_read_lock() held or preemption otherwise disabled. * Only two callers of this - ->dtor() which is called with the rcu_read_lock(), * and ->trim() which is called with the task lock held */ |
4ac845a2e
|
1394 1395 |
static void cfq_free_io_context(struct io_context *ioc) { |
4ac845a2e
|
1396 |
/* |
34e6bbf23
|
1397 1398 1399 1400 |
* ioc->refcount is zero here, or we are called from elv_unregister(), * so no more cic's are allowed to be linked into this ioc. So it * should be ok to iterate over the known list, we will see all cic's * since no new ones are added. |
4ac845a2e
|
1401 |
*/ |
07416d29b
|
1402 |
__call_for_each_cic(ioc, cic_free_func); |
1da177e4c
|
1403 |
} |
89850f7ee
|
1404 |
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
1405 |
{ |
28f95cbc3
|
1406 |
if (unlikely(cfqq == cfqd->active_queue)) { |
6084cdda0
|
1407 |
__cfq_slice_expired(cfqd, cfqq, 0); |
28f95cbc3
|
1408 1409 |
cfq_schedule_dispatch(cfqd); } |
22e2c507c
|
1410 |
|
89850f7ee
|
1411 1412 |
cfq_put_queue(cfqq); } |
22e2c507c
|
1413 |
|
89850f7ee
|
1414 1415 1416 |
static void __cfq_exit_single_io_context(struct cfq_data *cfqd, struct cfq_io_context *cic) { |
4faa3c815
|
1417 |
struct io_context *ioc = cic->ioc; |
fc46379da
|
1418 |
list_del_init(&cic->queue_list); |
4ac845a2e
|
1419 1420 1421 1422 |
/* * Make sure key == NULL is seen for dead queues */ |
fc46379da
|
1423 |
smp_wmb(); |
4ac845a2e
|
1424 |
cic->dead_key = (unsigned long) cic->key; |
fc46379da
|
1425 |
cic->key = NULL; |
4faa3c815
|
1426 1427 |
if (ioc->ioc_data == cic) rcu_assign_pointer(ioc->ioc_data, NULL); |
ff6657c6c
|
1428 1429 1430 |
if (cic->cfqq[BLK_RW_ASYNC]) { cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); cic->cfqq[BLK_RW_ASYNC] = NULL; |
12a057321
|
1431 |
} |
ff6657c6c
|
1432 1433 1434 |
if (cic->cfqq[BLK_RW_SYNC]) { cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); cic->cfqq[BLK_RW_SYNC] = NULL; |
12a057321
|
1435 |
} |
89850f7ee
|
1436 |
} |
4ac845a2e
|
1437 1438 |
static void cfq_exit_single_io_context(struct io_context *ioc, struct cfq_io_context *cic) |
89850f7ee
|
1439 1440 |
{ struct cfq_data *cfqd = cic->key; |
89850f7ee
|
1441 |
if (cfqd) { |
165125e1e
|
1442 |
struct request_queue *q = cfqd->queue; |
4ac845a2e
|
1443 |
unsigned long flags; |
89850f7ee
|
1444 |
|
4ac845a2e
|
1445 |
spin_lock_irqsave(q->queue_lock, flags); |
62c1fe9d9
|
1446 1447 1448 1449 1450 1451 1452 1453 |
/* * Ensure we get a fresh copy of the ->key to prevent * race between exiting task and queue */ smp_read_barrier_depends(); if (cic->key) __cfq_exit_single_io_context(cfqd, cic); |
4ac845a2e
|
1454 |
spin_unlock_irqrestore(q->queue_lock, flags); |
89850f7ee
|
1455 |
} |
1da177e4c
|
1456 |
} |
498d3aa2b
|
1457 1458 1459 1460 |
/* * The process that ioc belongs to has exited, we need to clean up * and put the internal structures we have that belongs to that process. */ |
e2d74ac06
|
1461 |
static void cfq_exit_io_context(struct io_context *ioc) |
1da177e4c
|
1462 |
{ |
4ac845a2e
|
1463 |
call_for_each_cic(ioc, cfq_exit_single_io_context); |
1da177e4c
|
1464 |
} |
22e2c507c
|
1465 |
static struct cfq_io_context * |
8267e268e
|
1466 |
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) |
1da177e4c
|
1467 |
{ |
b5deef901
|
1468 |
struct cfq_io_context *cic; |
1da177e4c
|
1469 |
|
94f6030ca
|
1470 1471 |
cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, cfqd->queue->node); |
1da177e4c
|
1472 |
if (cic) { |
22e2c507c
|
1473 |
cic->last_end_request = jiffies; |
553698f94
|
1474 |
INIT_LIST_HEAD(&cic->queue_list); |
ffc4e7595
|
1475 |
INIT_HLIST_NODE(&cic->cic_list); |
22e2c507c
|
1476 1477 |
cic->dtor = cfq_free_io_context; cic->exit = cfq_exit_io_context; |
4050cf167
|
1478 |
elv_ioc_count_inc(ioc_count); |
1da177e4c
|
1479 1480 1481 1482 |
} return cic; } |
fd0928df9
|
1483 |
static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) |
22e2c507c
|
1484 1485 1486 |
{ struct task_struct *tsk = current; int ioprio_class; |
3b18152c3
|
1487 |
if (!cfq_cfqq_prio_changed(cfqq)) |
22e2c507c
|
1488 |
return; |
fd0928df9
|
1489 |
ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); |
22e2c507c
|
1490 |
switch (ioprio_class) { |
fe094d98e
|
1491 1492 1493 1494 1495 |
default: printk(KERN_ERR "cfq: bad prio %x ", ioprio_class); case IOPRIO_CLASS_NONE: /* |
6d63c2755
|
1496 |
* no prio set, inherit CPU scheduling settings |
fe094d98e
|
1497 1498 |
*/ cfqq->ioprio = task_nice_ioprio(tsk); |
6d63c2755
|
1499 |
cfqq->ioprio_class = task_nice_ioclass(tsk); |
fe094d98e
|
1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 |
break; case IOPRIO_CLASS_RT: cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio_class = IOPRIO_CLASS_RT; break; case IOPRIO_CLASS_BE: cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio_class = IOPRIO_CLASS_BE; break; case IOPRIO_CLASS_IDLE: cfqq->ioprio_class = IOPRIO_CLASS_IDLE; cfqq->ioprio = 7; cfq_clear_cfqq_idle_window(cfqq); break; |
22e2c507c
|
1514 1515 1516 1517 1518 1519 1520 1521 |
} /* * keep track of original prio settings in case we have to temporarily * elevate the priority of this queue */ cfqq->org_ioprio = cfqq->ioprio; cfqq->org_ioprio_class = cfqq->ioprio_class; |
3b18152c3
|
1522 |
cfq_clear_cfqq_prio_changed(cfqq); |
22e2c507c
|
1523 |
} |
febffd618
|
1524 |
static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) |
22e2c507c
|
1525 |
{ |
478a82b0e
|
1526 1527 |
struct cfq_data *cfqd = cic->key; struct cfq_queue *cfqq; |
c1b707d25
|
1528 |
unsigned long flags; |
35e6077cb
|
1529 |
|
caaa5f9f0
|
1530 1531 |
if (unlikely(!cfqd)) return; |
c1b707d25
|
1532 |
spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
caaa5f9f0
|
1533 |
|
ff6657c6c
|
1534 |
cfqq = cic->cfqq[BLK_RW_ASYNC]; |
caaa5f9f0
|
1535 1536 |
if (cfqq) { struct cfq_queue *new_cfqq; |
ff6657c6c
|
1537 1538 |
new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc, GFP_ATOMIC); |
caaa5f9f0
|
1539 |
if (new_cfqq) { |
ff6657c6c
|
1540 |
cic->cfqq[BLK_RW_ASYNC] = new_cfqq; |
caaa5f9f0
|
1541 1542 |
cfq_put_queue(cfqq); } |
22e2c507c
|
1543 |
} |
caaa5f9f0
|
1544 |
|
ff6657c6c
|
1545 |
cfqq = cic->cfqq[BLK_RW_SYNC]; |
caaa5f9f0
|
1546 1547 |
if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); |
c1b707d25
|
1548 |
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
22e2c507c
|
1549 |
} |
fc46379da
|
1550 |
static void cfq_ioc_set_ioprio(struct io_context *ioc) |
22e2c507c
|
1551 |
{ |
4ac845a2e
|
1552 |
call_for_each_cic(ioc, changed_ioprio); |
fc46379da
|
1553 |
ioc->ioprio_changed = 0; |
22e2c507c
|
1554 |
} |
d5036d770
|
1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 |
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, pid_t pid, int is_sync) { RB_CLEAR_NODE(&cfqq->rb_node); RB_CLEAR_NODE(&cfqq->p_node); INIT_LIST_HEAD(&cfqq->fifo); atomic_set(&cfqq->ref, 0); cfqq->cfqd = cfqd; cfq_mark_cfqq_prio_changed(cfqq); if (is_sync) { if (!cfq_class_idle(cfqq)) cfq_mark_cfqq_idle_window(cfqq); cfq_mark_cfqq_sync(cfqq); } cfqq->pid = pid; } |
22e2c507c
|
1574 |
static struct cfq_queue * |
15c31be4d
|
1575 |
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, |
fd0928df9
|
1576 |
struct io_context *ioc, gfp_t gfp_mask) |
22e2c507c
|
1577 |
{ |
22e2c507c
|
1578 |
struct cfq_queue *cfqq, *new_cfqq = NULL; |
91fac317a
|
1579 |
struct cfq_io_context *cic; |
22e2c507c
|
1580 1581 |
retry: |
4ac845a2e
|
1582 |
cic = cfq_cic_lookup(cfqd, ioc); |
91fac317a
|
1583 1584 |
/* cic always exists here */ cfqq = cic_to_cfqq(cic, is_sync); |
22e2c507c
|
1585 |
|
6118b70b3
|
1586 1587 1588 1589 1590 1591 |
/* * Always try a new alloc if we fell back to the OOM cfqq * originally, since it should just be a temporary situation. */ if (!cfqq || cfqq == &cfqd->oom_cfqq) { cfqq = NULL; |
22e2c507c
|
1592 1593 1594 1595 1596 |
if (new_cfqq) { cfqq = new_cfqq; new_cfqq = NULL; } else if (gfp_mask & __GFP_WAIT) { spin_unlock_irq(cfqd->queue->queue_lock); |
94f6030ca
|
1597 |
new_cfqq = kmem_cache_alloc_node(cfq_pool, |
6118b70b3
|
1598 |
gfp_mask | __GFP_ZERO, |
94f6030ca
|
1599 |
cfqd->queue->node); |
22e2c507c
|
1600 |
spin_lock_irq(cfqd->queue->queue_lock); |
6118b70b3
|
1601 1602 |
if (new_cfqq) goto retry; |
22e2c507c
|
1603 |
} else { |
94f6030ca
|
1604 1605 1606 |
cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask | __GFP_ZERO, cfqd->queue->node); |
22e2c507c
|
1607 |
} |
6118b70b3
|
1608 1609 1610 1611 1612 1613 |
if (cfqq) { cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); cfq_init_prio_data(cfqq, ioc); cfq_log_cfqq(cfqd, cfqq, "alloced"); } else cfqq = &cfqd->oom_cfqq; |
22e2c507c
|
1614 1615 1616 1617 |
} if (new_cfqq) kmem_cache_free(cfq_pool, new_cfqq); |
22e2c507c
|
1618 1619 |
return cfqq; } |
c2dea2d1f
|
1620 1621 1622 |
static struct cfq_queue ** cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) { |
fe094d98e
|
1623 |
switch (ioprio_class) { |
c2dea2d1f
|
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 |
case IOPRIO_CLASS_RT: return &cfqd->async_cfqq[0][ioprio]; case IOPRIO_CLASS_BE: return &cfqd->async_cfqq[1][ioprio]; case IOPRIO_CLASS_IDLE: return &cfqd->async_idle_cfqq; default: BUG(); } } |
15c31be4d
|
1634 |
static struct cfq_queue * |
fd0928df9
|
1635 |
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, |
15c31be4d
|
1636 1637 |
gfp_t gfp_mask) { |
fd0928df9
|
1638 1639 |
const int ioprio = task_ioprio(ioc); const int ioprio_class = task_ioprio_class(ioc); |
c2dea2d1f
|
1640 |
struct cfq_queue **async_cfqq = NULL; |
15c31be4d
|
1641 |
struct cfq_queue *cfqq = NULL; |
c2dea2d1f
|
1642 1643 1644 1645 |
if (!is_sync) { async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); cfqq = *async_cfqq; } |
6118b70b3
|
1646 |
if (!cfqq) |
fd0928df9
|
1647 |
cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); |
15c31be4d
|
1648 1649 1650 1651 |
/* * pin the queue now that it's allocated, scheduler exit will prune it */ |
c2dea2d1f
|
1652 |
if (!is_sync && !(*async_cfqq)) { |
15c31be4d
|
1653 |
atomic_inc(&cfqq->ref); |
c2dea2d1f
|
1654 |
*async_cfqq = cfqq; |
15c31be4d
|
1655 1656 1657 1658 1659 |
} atomic_inc(&cfqq->ref); return cfqq; } |
498d3aa2b
|
1660 1661 1662 |
/* * We drop cfq io contexts lazily, so we may find a dead one. */ |
dbecf3ab4
|
1663 |
static void |
4ac845a2e
|
1664 1665 |
cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, struct cfq_io_context *cic) |
dbecf3ab4
|
1666 |
{ |
4ac845a2e
|
1667 |
unsigned long flags; |
fc46379da
|
1668 |
WARN_ON(!list_empty(&cic->queue_list)); |
597bc485d
|
1669 |
|
4ac845a2e
|
1670 |
spin_lock_irqsave(&ioc->lock, flags); |
4faa3c815
|
1671 |
BUG_ON(ioc->ioc_data == cic); |
597bc485d
|
1672 |
|
4ac845a2e
|
1673 |
radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); |
ffc4e7595
|
1674 |
hlist_del_rcu(&cic->cic_list); |
4ac845a2e
|
1675 1676 1677 |
spin_unlock_irqrestore(&ioc->lock, flags); cfq_cic_free(cic); |
dbecf3ab4
|
1678 |
} |
e2d74ac06
|
1679 |
static struct cfq_io_context * |
4ac845a2e
|
1680 |
cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) |
e2d74ac06
|
1681 |
{ |
e2d74ac06
|
1682 |
struct cfq_io_context *cic; |
d6de8be71
|
1683 |
unsigned long flags; |
4ac845a2e
|
1684 |
void *k; |
e2d74ac06
|
1685 |
|
91fac317a
|
1686 1687 |
if (unlikely(!ioc)) return NULL; |
d6de8be71
|
1688 |
rcu_read_lock(); |
597bc485d
|
1689 1690 1691 |
/* * we maintain a last-hit cache, to avoid browsing over the tree */ |
4ac845a2e
|
1692 |
cic = rcu_dereference(ioc->ioc_data); |
d6de8be71
|
1693 1694 |
if (cic && cic->key == cfqd) { rcu_read_unlock(); |
597bc485d
|
1695 |
return cic; |
d6de8be71
|
1696 |
} |
597bc485d
|
1697 |
|
4ac845a2e
|
1698 |
do { |
4ac845a2e
|
1699 1700 1701 1702 |
cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); rcu_read_unlock(); if (!cic) break; |
be3b07535
|
1703 1704 1705 |
/* ->key must be copied to avoid race with cfq_exit_queue() */ k = cic->key; if (unlikely(!k)) { |
4ac845a2e
|
1706 |
cfq_drop_dead_cic(cfqd, ioc, cic); |
d6de8be71
|
1707 |
rcu_read_lock(); |
4ac845a2e
|
1708 |
continue; |
dbecf3ab4
|
1709 |
} |
e2d74ac06
|
1710 |
|
d6de8be71
|
1711 |
spin_lock_irqsave(&ioc->lock, flags); |
4ac845a2e
|
1712 |
rcu_assign_pointer(ioc->ioc_data, cic); |
d6de8be71
|
1713 |
spin_unlock_irqrestore(&ioc->lock, flags); |
4ac845a2e
|
1714 1715 |
break; } while (1); |
e2d74ac06
|
1716 |
|
4ac845a2e
|
1717 |
return cic; |
e2d74ac06
|
1718 |
} |
4ac845a2e
|
1719 1720 1721 1722 1723 |
/* * Add cic into ioc, using cfqd as the search key. This enables us to lookup * the process specific cfq io context when entered from the block layer. * Also adds the cic to a per-cfqd list, used when this queue is removed. */ |
febffd618
|
1724 1725 |
static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, struct cfq_io_context *cic, gfp_t gfp_mask) |
e2d74ac06
|
1726 |
{ |
0261d6886
|
1727 |
unsigned long flags; |
4ac845a2e
|
1728 |
int ret; |
e2d74ac06
|
1729 |
|
4ac845a2e
|
1730 1731 1732 1733 |
ret = radix_tree_preload(gfp_mask); if (!ret) { cic->ioc = ioc; cic->key = cfqd; |
e2d74ac06
|
1734 |
|
4ac845a2e
|
1735 1736 1737 |
spin_lock_irqsave(&ioc->lock, flags); ret = radix_tree_insert(&ioc->radix_root, (unsigned long) cfqd, cic); |
ffc4e7595
|
1738 1739 |
if (!ret) hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); |
4ac845a2e
|
1740 |
spin_unlock_irqrestore(&ioc->lock, flags); |
e2d74ac06
|
1741 |
|
4ac845a2e
|
1742 1743 1744 1745 1746 1747 1748 |
radix_tree_preload_end(); if (!ret) { spin_lock_irqsave(cfqd->queue->queue_lock, flags); list_add(&cic->queue_list, &cfqd->cic_list); spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } |
e2d74ac06
|
1749 |
} |
4ac845a2e
|
1750 1751 1752 |
if (ret) printk(KERN_ERR "cfq: cic link failed! "); |
fc46379da
|
1753 |
|
4ac845a2e
|
1754 |
return ret; |
e2d74ac06
|
1755 |
} |
1da177e4c
|
1756 1757 1758 |
/* * Setup general io context and cfq io context. There can be several cfq * io contexts per general io context, if this process is doing io to more |
e2d74ac06
|
1759 |
* than one device managed by cfq. |
1da177e4c
|
1760 1761 |
*/ static struct cfq_io_context * |
e2d74ac06
|
1762 |
cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) |
1da177e4c
|
1763 |
{ |
22e2c507c
|
1764 |
struct io_context *ioc = NULL; |
1da177e4c
|
1765 |
struct cfq_io_context *cic; |
1da177e4c
|
1766 |
|
22e2c507c
|
1767 |
might_sleep_if(gfp_mask & __GFP_WAIT); |
1da177e4c
|
1768 |
|
b5deef901
|
1769 |
ioc = get_io_context(gfp_mask, cfqd->queue->node); |
1da177e4c
|
1770 1771 |
if (!ioc) return NULL; |
4ac845a2e
|
1772 |
cic = cfq_cic_lookup(cfqd, ioc); |
e2d74ac06
|
1773 1774 |
if (cic) goto out; |
1da177e4c
|
1775 |
|
e2d74ac06
|
1776 1777 1778 |
cic = cfq_alloc_io_context(cfqd, gfp_mask); if (cic == NULL) goto err; |
1da177e4c
|
1779 |
|
4ac845a2e
|
1780 1781 |
if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) goto err_free; |
1da177e4c
|
1782 |
out: |
fc46379da
|
1783 1784 1785 |
smp_read_barrier_depends(); if (unlikely(ioc->ioprio_changed)) cfq_ioc_set_ioprio(ioc); |
1da177e4c
|
1786 |
return cic; |
4ac845a2e
|
1787 1788 |
err_free: cfq_cic_free(cic); |
1da177e4c
|
1789 1790 1791 1792 |
err: put_io_context(ioc); return NULL; } |
22e2c507c
|
1793 1794 |
static void cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) |
1da177e4c
|
1795 |
{ |
aaf1228dd
|
1796 1797 |
unsigned long elapsed = jiffies - cic->last_end_request; unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); |
db3b5848e
|
1798 |
|
22e2c507c
|
1799 1800 1801 1802 |
cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; } |
1da177e4c
|
1803 |
|
206dc69b3
|
1804 |
static void |
6d048f531
|
1805 1806 |
cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, struct request *rq) |
206dc69b3
|
1807 1808 1809 |
{ sector_t sdist; u64 total; |
4d00aa47e
|
1810 1811 |
if (!cic->last_request_pos) sdist = 0; |
83096ebf1
|
1812 1813 |
else if (cic->last_request_pos < blk_rq_pos(rq)) sdist = blk_rq_pos(rq) - cic->last_request_pos; |
206dc69b3
|
1814 |
else |
83096ebf1
|
1815 |
sdist = cic->last_request_pos - blk_rq_pos(rq); |
206dc69b3
|
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 |
/* * Don't allow the seek distance to get too large from the * odd fragment, pagein, etc */ if (cic->seek_samples <= 60) /* second&third seek */ sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); else sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); cic->seek_samples = (7*cic->seek_samples + 256) / 8; cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; total = cic->seek_total + (cic->seek_samples/2); do_div(total, cic->seek_samples); cic->seek_mean = (sector_t)total; } |
1da177e4c
|
1832 |
|
22e2c507c
|
1833 1834 1835 1836 1837 1838 1839 1840 |
/* * Disable idle window if the process thinks too long or seeks so much that * it doesn't matter */ static void cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_io_context *cic) { |
7b679138b
|
1841 |
int old_idle, enable_idle; |
1be92f2fc
|
1842 |
|
0871714e0
|
1843 1844 1845 1846 |
/* * Don't idle for async or idle io prio class */ if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) |
1be92f2fc
|
1847 |
return; |
c265a7f41
|
1848 |
enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); |
1da177e4c
|
1849 |
|
66dac98ed
|
1850 |
if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || |
caaa5f9f0
|
1851 |
(cfqd->hw_tag && CIC_SEEKY(cic))) |
22e2c507c
|
1852 1853 1854 1855 1856 1857 |
enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { if (cic->ttime_mean > cfqd->cfq_slice_idle) enable_idle = 0; else enable_idle = 1; |
1da177e4c
|
1858 |
} |
7b679138b
|
1859 1860 1861 1862 1863 1864 1865 |
if (old_idle != enable_idle) { cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); if (enable_idle) cfq_mark_cfqq_idle_window(cfqq); else cfq_clear_cfqq_idle_window(cfqq); } |
22e2c507c
|
1866 |
} |
1da177e4c
|
1867 |
|
22e2c507c
|
1868 1869 1870 1871 1872 1873 |
/* * Check if new_cfqq should preempt the currently active queue. Return 0 for * no or if we aren't sure, a 1 will cause a preempt. */ static int cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, |
5e7053747
|
1874 |
struct request *rq) |
22e2c507c
|
1875 |
{ |
6d048f531
|
1876 |
struct cfq_queue *cfqq; |
22e2c507c
|
1877 |
|
6d048f531
|
1878 1879 |
cfqq = cfqd->active_queue; if (!cfqq) |
22e2c507c
|
1880 |
return 0; |
6d048f531
|
1881 1882 1883 1884 |
if (cfq_slice_used(cfqq)) return 1; if (cfq_class_idle(new_cfqq)) |
caaa5f9f0
|
1885 |
return 0; |
22e2c507c
|
1886 1887 1888 |
if (cfq_class_idle(cfqq)) return 1; |
1e3335de0
|
1889 |
|
22e2c507c
|
1890 |
/* |
374f84ac3
|
1891 1892 1893 |
* if the new request is sync, but the currently running queue is * not, let the sync request have priority. */ |
5e7053747
|
1894 |
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) |
22e2c507c
|
1895 |
return 1; |
1e3335de0
|
1896 |
|
374f84ac3
|
1897 1898 1899 1900 1901 1902 |
/* * So both queues are sync. Let the new request get disk time if * it's a metadata request and the current queue is doing regular IO. */ if (rq_is_meta(rq) && !cfqq->meta_pending) return 1; |
22e2c507c
|
1903 |
|
3a9a3f6cc
|
1904 1905 1906 1907 1908 |
/* * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. */ if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) return 1; |
1e3335de0
|
1909 1910 1911 1912 1913 1914 1915 |
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) return 0; /* * if this request is as-good as one we would expect from the * current cfqq, let it preempt */ |
6d048f531
|
1916 |
if (cfq_rq_close(cfqd, rq)) |
1e3335de0
|
1917 |
return 1; |
22e2c507c
|
1918 1919 1920 1921 1922 1923 1924 1925 1926 |
return 0; } /* * cfqq preempts the active queue. if we allowed preempt with no slice left, * let it have half of its nominal slice. */ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { |
7b679138b
|
1927 |
cfq_log_cfqq(cfqd, cfqq, "preempt"); |
6084cdda0
|
1928 |
cfq_slice_expired(cfqd, 1); |
22e2c507c
|
1929 |
|
bf5722567
|
1930 1931 1932 1933 1934 |
/* * Put the new queue at the front of the of the current list, * so we know that it will be selected next. */ BUG_ON(!cfq_cfqq_on_rr(cfqq)); |
edd75ffd9
|
1935 1936 |
cfq_service_tree_add(cfqd, cfqq, 1); |
bf5722567
|
1937 |
|
44f7c1606
|
1938 1939 |
cfqq->slice_end = 0; cfq_mark_cfqq_slice_new(cfqq); |
22e2c507c
|
1940 1941 1942 |
} /* |
5e7053747
|
1943 |
* Called when a new fs request (rq) is added (to cfqq). Check if there's |
22e2c507c
|
1944 1945 1946 |
* something we should do about it */ static void |
5e7053747
|
1947 1948 |
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) |
22e2c507c
|
1949 |
{ |
5e7053747
|
1950 |
struct cfq_io_context *cic = RQ_CIC(rq); |
12e9fddd6
|
1951 |
|
45333d5a3
|
1952 |
cfqd->rq_queued++; |
374f84ac3
|
1953 1954 |
if (rq_is_meta(rq)) cfqq->meta_pending++; |
9c2c38a12
|
1955 |
cfq_update_io_thinktime(cfqd, cic); |
6d048f531
|
1956 |
cfq_update_io_seektime(cfqd, cic, rq); |
9c2c38a12
|
1957 |
cfq_update_idle_window(cfqd, cfqq, cic); |
83096ebf1
|
1958 |
cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); |
22e2c507c
|
1959 1960 1961 |
if (cfqq == cfqd->active_queue) { /* |
b029195dd
|
1962 1963 1964 |
* Remember that we saw a request from this process, but * don't start queuing just yet. Otherwise we risk seeing lots * of tiny requests, because we disrupt the normal plugging |
d6ceb25e8
|
1965 1966 |
* and merging. If the request is already larger than a single * page, let it rip immediately. For that case we assume that |
2d8707229
|
1967 1968 1969 |
* merging is already done. Ditto for a busy system that * has other work pending, don't risk delaying until the * idle timer unplug to continue working. |
22e2c507c
|
1970 |
*/ |
d6ceb25e8
|
1971 |
if (cfq_cfqq_wait_request(cfqq)) { |
2d8707229
|
1972 1973 |
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || cfqd->busy_queues > 1) { |
d6ceb25e8
|
1974 |
del_timer(&cfqd->idle_slice_timer); |
a7f557923
|
1975 |
__blk_run_queue(cfqd->queue); |
d6ceb25e8
|
1976 |
} |
b029195dd
|
1977 |
cfq_mark_cfqq_must_dispatch(cfqq); |
d6ceb25e8
|
1978 |
} |
5e7053747
|
1979 |
} else if (cfq_should_preempt(cfqd, cfqq, rq)) { |
22e2c507c
|
1980 1981 1982 |
/* * not the active queue - expire current slice if it is * idle and has expired it's mean thinktime or this new queue |
3a9a3f6cc
|
1983 1984 |
* has some old slice time left and is of higher priority or * this new queue is RT and the current one is BE |
22e2c507c
|
1985 1986 |
*/ cfq_preempt_queue(cfqd, cfqq); |
a7f557923
|
1987 |
__blk_run_queue(cfqd->queue); |
22e2c507c
|
1988 |
} |
1da177e4c
|
1989 |
} |
165125e1e
|
1990 |
static void cfq_insert_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
1991 |
{ |
b4878f245
|
1992 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
5e7053747
|
1993 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
22e2c507c
|
1994 |
|
7b679138b
|
1995 |
cfq_log_cfqq(cfqd, cfqq, "insert_request"); |
fd0928df9
|
1996 |
cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); |
1da177e4c
|
1997 |
|
5e7053747
|
1998 |
cfq_add_rq_rb(rq); |
1da177e4c
|
1999 |
|
22e2c507c
|
2000 |
list_add_tail(&rq->queuelist, &cfqq->fifo); |
5e7053747
|
2001 |
cfq_rq_enqueued(cfqd, cfqq, rq); |
1da177e4c
|
2002 |
} |
45333d5a3
|
2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 |
/* * Update hw_tag based on peak queue depth over 50 samples under * sufficient load. */ static void cfq_update_hw_tag(struct cfq_data *cfqd) { if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak) cfqd->rq_in_driver_peak = cfqd->rq_in_driver; if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) return; if (cfqd->hw_tag_samples++ < 50) return; if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN) cfqd->hw_tag = 1; else cfqd->hw_tag = 0; cfqd->hw_tag_samples = 0; cfqd->rq_in_driver_peak = 0; } |
165125e1e
|
2027 |
static void cfq_completed_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
2028 |
{ |
5e7053747
|
2029 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
b4878f245
|
2030 |
struct cfq_data *cfqd = cfqq->cfqd; |
5380a101d
|
2031 |
const int sync = rq_is_sync(rq); |
b4878f245
|
2032 |
unsigned long now; |
1da177e4c
|
2033 |
|
b4878f245
|
2034 |
now = jiffies; |
7b679138b
|
2035 |
cfq_log_cfqq(cfqd, cfqq, "complete"); |
1da177e4c
|
2036 |
|
45333d5a3
|
2037 |
cfq_update_hw_tag(cfqd); |
b4878f245
|
2038 |
WARN_ON(!cfqd->rq_in_driver); |
6d048f531
|
2039 |
WARN_ON(!cfqq->dispatched); |
b4878f245
|
2040 |
cfqd->rq_in_driver--; |
6d048f531
|
2041 |
cfqq->dispatched--; |
1da177e4c
|
2042 |
|
3ed9a2965
|
2043 2044 |
if (cfq_cfqq_sync(cfqq)) cfqd->sync_flight--; |
caaa5f9f0
|
2045 |
if (sync) |
5e7053747
|
2046 |
RQ_CIC(rq)->last_end_request = now; |
caaa5f9f0
|
2047 2048 2049 2050 2051 2052 |
/* * If this is the active queue, check if it needs to be expired, * or if we want to idle in case it has no pending requests. */ if (cfqd->active_queue == cfqq) { |
a36e71f99
|
2053 |
const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); |
44f7c1606
|
2054 2055 2056 2057 |
if (cfq_cfqq_slice_new(cfqq)) { cfq_set_prio_slice(cfqd, cfqq); cfq_clear_cfqq_slice_new(cfqq); } |
a36e71f99
|
2058 2059 2060 2061 2062 2063 2064 |
/* * If there are no requests waiting in this queue, and * there are other queues ready to issue requests, AND * those other queues are issuing requests within our * mean seek distance, give them a chance to run instead * of idling. */ |
0871714e0
|
2065 |
if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) |
6084cdda0
|
2066 |
cfq_slice_expired(cfqd, 1); |
a36e71f99
|
2067 2068 |
else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) && sync && !rq_noidle(rq)) |
6d048f531
|
2069 |
cfq_arm_slice_timer(cfqd); |
caaa5f9f0
|
2070 |
} |
6d048f531
|
2071 2072 2073 |
if (!cfqd->rq_in_driver) cfq_schedule_dispatch(cfqd); |
1da177e4c
|
2074 |
} |
22e2c507c
|
2075 2076 2077 2078 2079 |
/* * we temporarily boost lower priority queues if they are holding fs exclusive * resources. they are boosted to normal prio (CLASS_BE/4) */ static void cfq_prio_boost(struct cfq_queue *cfqq) |
1da177e4c
|
2080 |
{ |
22e2c507c
|
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 |
if (has_fs_excl()) { /* * boost idle prio on transactions that would lock out other * users of the filesystem */ if (cfq_class_idle(cfqq)) cfqq->ioprio_class = IOPRIO_CLASS_BE; if (cfqq->ioprio > IOPRIO_NORM) cfqq->ioprio = IOPRIO_NORM; } else { /* * check if we need to unboost the queue */ if (cfqq->ioprio_class != cfqq->org_ioprio_class) cfqq->ioprio_class = cfqq->org_ioprio_class; if (cfqq->ioprio != cfqq->org_ioprio) cfqq->ioprio = cfqq->org_ioprio; } |
22e2c507c
|
2099 |
} |
1da177e4c
|
2100 |
|
89850f7ee
|
2101 |
static inline int __cfq_may_queue(struct cfq_queue *cfqq) |
22e2c507c
|
2102 |
{ |
3b18152c3
|
2103 |
if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && |
99f95e528
|
2104 |
!cfq_cfqq_must_alloc_slice(cfqq)) { |
3b18152c3
|
2105 |
cfq_mark_cfqq_must_alloc_slice(cfqq); |
22e2c507c
|
2106 |
return ELV_MQUEUE_MUST; |
3b18152c3
|
2107 |
} |
1da177e4c
|
2108 |
|
22e2c507c
|
2109 |
return ELV_MQUEUE_MAY; |
22e2c507c
|
2110 |
} |
165125e1e
|
2111 |
static int cfq_may_queue(struct request_queue *q, int rw) |
22e2c507c
|
2112 2113 2114 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; struct task_struct *tsk = current; |
91fac317a
|
2115 |
struct cfq_io_context *cic; |
22e2c507c
|
2116 2117 2118 2119 2120 2121 2122 2123 |
struct cfq_queue *cfqq; /* * don't force setup of a queue from here, as a call to may_queue * does not necessarily imply that a request actually will be queued. * so just lookup a possibly existing queue, or return 'may queue' * if that fails */ |
4ac845a2e
|
2124 |
cic = cfq_cic_lookup(cfqd, tsk->io_context); |
91fac317a
|
2125 2126 |
if (!cic) return ELV_MQUEUE_MAY; |
b0b78f81a
|
2127 |
cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); |
22e2c507c
|
2128 |
if (cfqq) { |
fd0928df9
|
2129 |
cfq_init_prio_data(cfqq, cic->ioc); |
22e2c507c
|
2130 |
cfq_prio_boost(cfqq); |
89850f7ee
|
2131 |
return __cfq_may_queue(cfqq); |
22e2c507c
|
2132 2133 2134 |
} return ELV_MQUEUE_MAY; |
1da177e4c
|
2135 |
} |
1da177e4c
|
2136 2137 2138 |
/* * queue lock held here */ |
bb37b94c6
|
2139 |
static void cfq_put_request(struct request *rq) |
1da177e4c
|
2140 |
{ |
5e7053747
|
2141 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
1da177e4c
|
2142 |
|
5e7053747
|
2143 |
if (cfqq) { |
22e2c507c
|
2144 |
const int rw = rq_data_dir(rq); |
1da177e4c
|
2145 |
|
22e2c507c
|
2146 2147 |
BUG_ON(!cfqq->allocated[rw]); cfqq->allocated[rw]--; |
1da177e4c
|
2148 |
|
5e7053747
|
2149 |
put_io_context(RQ_CIC(rq)->ioc); |
1da177e4c
|
2150 |
|
1da177e4c
|
2151 |
rq->elevator_private = NULL; |
5e7053747
|
2152 |
rq->elevator_private2 = NULL; |
1da177e4c
|
2153 |
|
1da177e4c
|
2154 2155 2156 2157 2158 |
cfq_put_queue(cfqq); } } /* |
22e2c507c
|
2159 |
* Allocate cfq data structures associated with this request. |
1da177e4c
|
2160 |
*/ |
22e2c507c
|
2161 |
static int |
165125e1e
|
2162 |
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
1da177e4c
|
2163 2164 2165 2166 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_io_context *cic; const int rw = rq_data_dir(rq); |
7749a8d42
|
2167 |
const int is_sync = rq_is_sync(rq); |
22e2c507c
|
2168 |
struct cfq_queue *cfqq; |
1da177e4c
|
2169 2170 2171 |
unsigned long flags; might_sleep_if(gfp_mask & __GFP_WAIT); |
e2d74ac06
|
2172 |
cic = cfq_get_io_context(cfqd, gfp_mask); |
22e2c507c
|
2173 |
|
1da177e4c
|
2174 |
spin_lock_irqsave(q->queue_lock, flags); |
22e2c507c
|
2175 2176 |
if (!cic) goto queue_fail; |
91fac317a
|
2177 |
cfqq = cic_to_cfqq(cic, is_sync); |
32f2e807a
|
2178 |
if (!cfqq || cfqq == &cfqd->oom_cfqq) { |
fd0928df9
|
2179 |
cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); |
91fac317a
|
2180 2181 |
cic_set_cfqq(cic, cfqq, is_sync); } |
1da177e4c
|
2182 2183 |
cfqq->allocated[rw]++; |
3b18152c3
|
2184 |
cfq_clear_cfqq_must_alloc(cfqq); |
22e2c507c
|
2185 |
atomic_inc(&cfqq->ref); |
1da177e4c
|
2186 |
|
5e7053747
|
2187 |
spin_unlock_irqrestore(q->queue_lock, flags); |
3b18152c3
|
2188 |
|
5e7053747
|
2189 2190 2191 |
rq->elevator_private = cic; rq->elevator_private2 = cfqq; return 0; |
1da177e4c
|
2192 |
|
22e2c507c
|
2193 2194 2195 |
queue_fail: if (cic) put_io_context(cic->ioc); |
89850f7ee
|
2196 |
|
3b18152c3
|
2197 |
cfq_schedule_dispatch(cfqd); |
1da177e4c
|
2198 |
spin_unlock_irqrestore(q->queue_lock, flags); |
7b679138b
|
2199 |
cfq_log(cfqd, "set_request fail"); |
1da177e4c
|
2200 2201 |
return 1; } |
65f27f384
|
2202 |
static void cfq_kick_queue(struct work_struct *work) |
22e2c507c
|
2203 |
{ |
65f27f384
|
2204 2205 |
struct cfq_data *cfqd = container_of(work, struct cfq_data, unplug_work); |
165125e1e
|
2206 |
struct request_queue *q = cfqd->queue; |
22e2c507c
|
2207 |
|
40bb54d19
|
2208 |
spin_lock_irq(q->queue_lock); |
a7f557923
|
2209 |
__blk_run_queue(cfqd->queue); |
40bb54d19
|
2210 |
spin_unlock_irq(q->queue_lock); |
22e2c507c
|
2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 |
} /* * Timer running if the active_queue is currently idling inside its time slice */ static void cfq_idle_slice_timer(unsigned long data) { struct cfq_data *cfqd = (struct cfq_data *) data; struct cfq_queue *cfqq; unsigned long flags; |
3c6bd2f87
|
2221 |
int timed_out = 1; |
22e2c507c
|
2222 |
|
7b679138b
|
2223 |
cfq_log(cfqd, "idle timer fired"); |
22e2c507c
|
2224 |
spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
fe094d98e
|
2225 2226 |
cfqq = cfqd->active_queue; if (cfqq) { |
3c6bd2f87
|
2227 |
timed_out = 0; |
22e2c507c
|
2228 |
/* |
b029195dd
|
2229 2230 2231 2232 2233 2234 |
* We saw a request before the queue expired, let it through */ if (cfq_cfqq_must_dispatch(cfqq)) goto out_kick; /* |
22e2c507c
|
2235 2236 |
* expired */ |
44f7c1606
|
2237 |
if (cfq_slice_used(cfqq)) |
22e2c507c
|
2238 2239 2240 2241 2242 2243 |
goto expire; /* * only expire and reinvoke request handler, if there are * other queues with pending requests */ |
caaa5f9f0
|
2244 |
if (!cfqd->busy_queues) |
22e2c507c
|
2245 |
goto out_cont; |
22e2c507c
|
2246 2247 2248 2249 |
/* * not expired and it has a request pending, let it dispatch */ |
75e50984f
|
2250 |
if (!RB_EMPTY_ROOT(&cfqq->sort_list)) |
22e2c507c
|
2251 |
goto out_kick; |
22e2c507c
|
2252 2253 |
} expire: |
6084cdda0
|
2254 |
cfq_slice_expired(cfqd, timed_out); |
22e2c507c
|
2255 |
out_kick: |
3b18152c3
|
2256 |
cfq_schedule_dispatch(cfqd); |
22e2c507c
|
2257 2258 2259 |
out_cont: spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } |
3b18152c3
|
2260 2261 2262 |
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) { del_timer_sync(&cfqd->idle_slice_timer); |
64d01dc9e
|
2263 |
cancel_work_sync(&cfqd->unplug_work); |
3b18152c3
|
2264 |
} |
22e2c507c
|
2265 |
|
c2dea2d1f
|
2266 2267 2268 2269 2270 2271 2272 2273 2274 |
static void cfq_put_async_queues(struct cfq_data *cfqd) { int i; for (i = 0; i < IOPRIO_BE_NR; i++) { if (cfqd->async_cfqq[0][i]) cfq_put_queue(cfqd->async_cfqq[0][i]); if (cfqd->async_cfqq[1][i]) cfq_put_queue(cfqd->async_cfqq[1][i]); |
c2dea2d1f
|
2275 |
} |
2389d1ef1
|
2276 2277 2278 |
if (cfqd->async_idle_cfqq) cfq_put_queue(cfqd->async_idle_cfqq); |
c2dea2d1f
|
2279 |
} |
b374d18a4
|
2280 |
static void cfq_exit_queue(struct elevator_queue *e) |
1da177e4c
|
2281 |
{ |
22e2c507c
|
2282 |
struct cfq_data *cfqd = e->elevator_data; |
165125e1e
|
2283 |
struct request_queue *q = cfqd->queue; |
22e2c507c
|
2284 |
|
3b18152c3
|
2285 |
cfq_shutdown_timer_wq(cfqd); |
e2d74ac06
|
2286 |
|
d9ff41879
|
2287 |
spin_lock_irq(q->queue_lock); |
e2d74ac06
|
2288 |
|
d9ff41879
|
2289 |
if (cfqd->active_queue) |
6084cdda0
|
2290 |
__cfq_slice_expired(cfqd, cfqd->active_queue, 0); |
e2d74ac06
|
2291 2292 |
while (!list_empty(&cfqd->cic_list)) { |
d9ff41879
|
2293 2294 2295 |
struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, struct cfq_io_context, queue_list); |
89850f7ee
|
2296 2297 |
__cfq_exit_single_io_context(cfqd, cic); |
d9ff41879
|
2298 |
} |
e2d74ac06
|
2299 |
|
c2dea2d1f
|
2300 |
cfq_put_async_queues(cfqd); |
15c31be4d
|
2301 |
|
d9ff41879
|
2302 |
spin_unlock_irq(q->queue_lock); |
a90d742e4
|
2303 2304 |
cfq_shutdown_timer_wq(cfqd); |
a90d742e4
|
2305 |
kfree(cfqd); |
1da177e4c
|
2306 |
} |
165125e1e
|
2307 |
static void *cfq_init_queue(struct request_queue *q) |
1da177e4c
|
2308 2309 |
{ struct cfq_data *cfqd; |
26a2ac009
|
2310 |
int i; |
1da177e4c
|
2311 |
|
94f6030ca
|
2312 |
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); |
1da177e4c
|
2313 |
if (!cfqd) |
bc1c11697
|
2314 |
return NULL; |
1da177e4c
|
2315 |
|
cc09e2990
|
2316 |
cfqd->service_tree = CFQ_RB_ROOT; |
26a2ac009
|
2317 2318 2319 2320 2321 2322 2323 2324 |
/* * Not strictly needed (since RB_ROOT just clears the node and we * zeroed cfqd on alloc), but better be safe in case someone decides * to add magic to the rb code */ for (i = 0; i < CFQ_PRIO_LISTS; i++) cfqd->prio_trees[i] = RB_ROOT; |
6118b70b3
|
2325 2326 2327 2328 2329 2330 2331 |
/* * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. * Grab a permanent reference to it, so that the normal code flow * will not attempt to free it. */ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); atomic_inc(&cfqd->oom_cfqq.ref); |
d9ff41879
|
2332 |
INIT_LIST_HEAD(&cfqd->cic_list); |
1da177e4c
|
2333 |
|
1da177e4c
|
2334 |
cfqd->queue = q; |
1da177e4c
|
2335 |
|
22e2c507c
|
2336 2337 2338 |
init_timer(&cfqd->idle_slice_timer); cfqd->idle_slice_timer.function = cfq_idle_slice_timer; cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
65f27f384
|
2339 |
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
22e2c507c
|
2340 |
|
1da177e4c
|
2341 |
cfqd->cfq_quantum = cfq_quantum; |
22e2c507c
|
2342 2343 |
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; |
1da177e4c
|
2344 2345 |
cfqd->cfq_back_max = cfq_back_max; cfqd->cfq_back_penalty = cfq_back_penalty; |
22e2c507c
|
2346 2347 2348 2349 |
cfqd->cfq_slice[0] = cfq_slice_async; cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; |
45333d5a3
|
2350 |
cfqd->hw_tag = 1; |
3b18152c3
|
2351 |
|
bc1c11697
|
2352 |
return cfqd; |
1da177e4c
|
2353 2354 2355 2356 |
} static void cfq_slab_kill(void) { |
d6de8be71
|
2357 2358 2359 2360 |
/* * Caller already ensured that pending RCU callbacks are completed, * so we should have no busy allocations at this point. */ |
1da177e4c
|
2361 2362 2363 2364 2365 2366 2367 2368 |
if (cfq_pool) kmem_cache_destroy(cfq_pool); if (cfq_ioc_pool) kmem_cache_destroy(cfq_ioc_pool); } static int __init cfq_slab_setup(void) { |
0a31bd5f2
|
2369 |
cfq_pool = KMEM_CACHE(cfq_queue, 0); |
1da177e4c
|
2370 2371 |
if (!cfq_pool) goto fail; |
34e6bbf23
|
2372 |
cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); |
1da177e4c
|
2373 2374 2375 2376 2377 2378 2379 2380 |
if (!cfq_ioc_pool) goto fail; return 0; fail: cfq_slab_kill(); return -ENOMEM; } |
1da177e4c
|
2381 2382 2383 |
/* * sysfs parts below --> */ |
1da177e4c
|
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 |
static ssize_t cfq_var_show(unsigned int var, char *page) { return sprintf(page, "%d ", var); } static ssize_t cfq_var_store(unsigned int *var, const char *page, size_t count) { char *p = (char *) page; *var = simple_strtoul(p, &p, 10); return count; } |
1da177e4c
|
2399 |
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
b374d18a4
|
2400 |
static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
1da177e4c
|
2401 |
{ \ |
3d1ab40f4
|
2402 |
struct cfq_data *cfqd = e->elevator_data; \ |
1da177e4c
|
2403 2404 2405 2406 2407 2408 |
unsigned int __data = __VAR; \ if (__CONV) \ __data = jiffies_to_msecs(__data); \ return cfq_var_show(__data, (page)); \ } SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); |
22e2c507c
|
2409 2410 |
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); |
e572ec7e4
|
2411 2412 |
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); |
22e2c507c
|
2413 2414 2415 2416 |
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
1da177e4c
|
2417 2418 2419 |
#undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
b374d18a4
|
2420 |
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
1da177e4c
|
2421 |
{ \ |
3d1ab40f4
|
2422 |
struct cfq_data *cfqd = e->elevator_data; \ |
1da177e4c
|
2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 |
unsigned int __data; \ int ret = cfq_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ if (__CONV) \ *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ return ret; \ } STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); |
fe094d98e
|
2436 2437 2438 2439 |
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); |
e572ec7e4
|
2440 |
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); |
fe094d98e
|
2441 2442 |
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); |
22e2c507c
|
2443 2444 2445 |
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
fe094d98e
|
2446 2447 |
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); |
1da177e4c
|
2448 |
#undef STORE_FUNCTION |
e572ec7e4
|
2449 2450 2451 2452 2453 |
#define CFQ_ATTR(name) \ __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(quantum), |
e572ec7e4
|
2454 2455 2456 2457 2458 2459 2460 2461 |
CFQ_ATTR(fifo_expire_sync), CFQ_ATTR(fifo_expire_async), CFQ_ATTR(back_seek_max), CFQ_ATTR(back_seek_penalty), CFQ_ATTR(slice_sync), CFQ_ATTR(slice_async), CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), |
e572ec7e4
|
2462 |
__ATTR_NULL |
1da177e4c
|
2463 |
}; |
1da177e4c
|
2464 2465 2466 2467 2468 |
static struct elevator_type iosched_cfq = { .ops = { .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, |
da7752650
|
2469 |
.elevator_allow_merge_fn = cfq_allow_merge, |
b4878f245
|
2470 |
.elevator_dispatch_fn = cfq_dispatch_requests, |
1da177e4c
|
2471 |
.elevator_add_req_fn = cfq_insert_request, |
b4878f245
|
2472 |
.elevator_activate_req_fn = cfq_activate_request, |
1da177e4c
|
2473 2474 2475 |
.elevator_deactivate_req_fn = cfq_deactivate_request, .elevator_queue_empty_fn = cfq_queue_empty, .elevator_completed_req_fn = cfq_completed_request, |
21183b07e
|
2476 2477 |
.elevator_former_req_fn = elv_rb_former_request, .elevator_latter_req_fn = elv_rb_latter_request, |
1da177e4c
|
2478 2479 2480 2481 2482 |
.elevator_set_req_fn = cfq_set_request, .elevator_put_req_fn = cfq_put_request, .elevator_may_queue_fn = cfq_may_queue, .elevator_init_fn = cfq_init_queue, .elevator_exit_fn = cfq_exit_queue, |
fc46379da
|
2483 |
.trim = cfq_free_io_context, |
1da177e4c
|
2484 |
}, |
3d1ab40f4
|
2485 |
.elevator_attrs = cfq_attrs, |
1da177e4c
|
2486 2487 2488 2489 2490 2491 |
.elevator_name = "cfq", .elevator_owner = THIS_MODULE, }; static int __init cfq_init(void) { |
22e2c507c
|
2492 2493 2494 2495 2496 2497 2498 |
/* * could be 0 on HZ < 1000 setups */ if (!cfq_slice_async) cfq_slice_async = 1; if (!cfq_slice_idle) cfq_slice_idle = 1; |
1da177e4c
|
2499 2500 |
if (cfq_slab_setup()) return -ENOMEM; |
2fdd82bd8
|
2501 |
elv_register(&iosched_cfq); |
1da177e4c
|
2502 |
|
2fdd82bd8
|
2503 |
return 0; |
1da177e4c
|
2504 2505 2506 2507 |
} static void __exit cfq_exit(void) { |
6e9a4738c
|
2508 |
DECLARE_COMPLETION_ONSTACK(all_gone); |
1da177e4c
|
2509 |
elv_unregister(&iosched_cfq); |
334e94de9
|
2510 |
ioc_gone = &all_gone; |
fba822722
|
2511 2512 |
/* ioc_gone's update must be visible before reading ioc_count */ smp_wmb(); |
d6de8be71
|
2513 2514 2515 2516 2517 |
/* * this also protects us from entering cfq_slab_kill() with * pending RCU callbacks */ |
4050cf167
|
2518 |
if (elv_ioc_count_read(ioc_count)) |
9a11b4ed0
|
2519 |
wait_for_completion(&all_gone); |
83521d3eb
|
2520 |
cfq_slab_kill(); |
1da177e4c
|
2521 2522 2523 2524 2525 2526 2527 2528 |
} module_init(cfq_init); module_exit(cfq_exit); MODULE_AUTHOR("Jens Axboe"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); |