Blame view
block/cfq-iosched.c
56.6 KB
1da177e4c
|
1 |
/* |
1da177e4c
|
2 3 4 5 6 |
* CFQ, or complete fairness queueing, disk scheduler. * * Based on ideas from a previously unfinished io * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. * |
0fe234795
|
7 |
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
1da177e4c
|
8 |
*/ |
1da177e4c
|
9 |
#include <linux/module.h> |
1cc9be68e
|
10 11 |
#include <linux/blkdev.h> #include <linux/elevator.h> |
1da177e4c
|
12 |
#include <linux/rbtree.h> |
22e2c507c
|
13 |
#include <linux/ioprio.h> |
7b679138b
|
14 |
#include <linux/blktrace_api.h> |
1da177e4c
|
15 16 17 18 |
/* * tunables */ |
fe094d98e
|
19 20 |
/* max queue in one round of service */ static const int cfq_quantum = 4; |
64100099e
|
21 |
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; |
fe094d98e
|
22 23 24 25 |
/* maximum backwards seek, in KiB */ static const int cfq_back_max = 16 * 1024; /* penalty of a backwards seek */ static const int cfq_back_penalty = 2; |
64100099e
|
26 |
static const int cfq_slice_sync = HZ / 10; |
3b18152c3
|
27 |
static int cfq_slice_async = HZ / 25; |
64100099e
|
28 |
static const int cfq_slice_async_rq = 2; |
caaa5f9f0
|
29 |
static int cfq_slice_idle = HZ / 125; |
22e2c507c
|
30 |
|
d9e7620e6
|
31 |
/* |
0871714e0
|
32 |
* offset from end of service tree |
d9e7620e6
|
33 |
*/ |
0871714e0
|
34 |
#define CFQ_IDLE_DELAY (HZ / 5) |
d9e7620e6
|
35 36 37 38 39 |
/* * below this threshold, we consider thinktime immediate */ #define CFQ_MIN_TT (2) |
22e2c507c
|
40 |
#define CFQ_SLICE_SCALE (5) |
fe094d98e
|
41 42 |
#define RQ_CIC(rq) \ ((struct cfq_io_context *) (rq)->elevator_private) |
7b679138b
|
43 |
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) |
1da177e4c
|
44 |
|
e18b890bb
|
45 46 |
static struct kmem_cache *cfq_pool; static struct kmem_cache *cfq_ioc_pool; |
1da177e4c
|
47 |
|
4050cf167
|
48 |
static DEFINE_PER_CPU(unsigned long, ioc_count); |
334e94de9
|
49 |
static struct completion *ioc_gone; |
9a11b4ed0
|
50 |
static DEFINE_SPINLOCK(ioc_gone_lock); |
334e94de9
|
51 |
|
22e2c507c
|
52 53 |
#define CFQ_PRIO_LISTS IOPRIO_BE_NR #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
22e2c507c
|
54 |
#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) |
3b18152c3
|
55 56 |
#define ASYNC (0) #define SYNC (1) |
206dc69b3
|
57 |
#define sample_valid(samples) ((samples) > 80) |
22e2c507c
|
58 |
/* |
cc09e2990
|
59 60 61 62 63 64 65 66 67 68 69 70 |
* Most of our rbtree usage is for sorting with min extraction, so * if we cache the leftmost node we don't have to walk down the tree * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should * move this into the elevator for the rq sorting as well. */ struct cfq_rb_root { struct rb_root rb; struct rb_node *left; }; #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } /* |
22e2c507c
|
71 72 |
* Per block device queue structure */ |
1da177e4c
|
73 |
struct cfq_data { |
165125e1e
|
74 |
struct request_queue *queue; |
22e2c507c
|
75 76 77 78 |
/* * rr list of queues with requests and the count of them */ |
cc09e2990
|
79 |
struct cfq_rb_root service_tree; |
22e2c507c
|
80 |
unsigned int busy_queues; |
22e2c507c
|
81 |
int rq_in_driver; |
3ed9a2965
|
82 |
int sync_flight; |
25776e359
|
83 |
int hw_tag; |
1da177e4c
|
84 |
|
22e2c507c
|
85 |
/* |
22e2c507c
|
86 87 88 89 |
* idle window management */ struct timer_list idle_slice_timer; struct work_struct unplug_work; |
1da177e4c
|
90 |
|
22e2c507c
|
91 92 |
struct cfq_queue *active_queue; struct cfq_io_context *active_cic; |
22e2c507c
|
93 |
|
c2dea2d1f
|
94 95 96 97 98 |
/* * async queue for each priority case */ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; struct cfq_queue *async_idle_cfqq; |
15c31be4d
|
99 |
|
6d048f531
|
100 |
sector_t last_position; |
22e2c507c
|
101 |
unsigned long last_end_request; |
1da177e4c
|
102 |
|
1da177e4c
|
103 104 105 106 |
/* * tunables, see top of file */ unsigned int cfq_quantum; |
22e2c507c
|
107 |
unsigned int cfq_fifo_expire[2]; |
1da177e4c
|
108 109 |
unsigned int cfq_back_penalty; unsigned int cfq_back_max; |
22e2c507c
|
110 111 112 |
unsigned int cfq_slice[2]; unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; |
d9ff41879
|
113 114 |
struct list_head cic_list; |
1da177e4c
|
115 |
}; |
22e2c507c
|
116 117 118 |
/* * Per process-grouping structure */ |
1da177e4c
|
119 120 121 |
struct cfq_queue { /* reference count */ atomic_t ref; |
be754d2c2
|
122 123 |
/* various state flags, see below */ unsigned int flags; |
1da177e4c
|
124 125 |
/* parent cfq_data */ struct cfq_data *cfqd; |
d9e7620e6
|
126 127 128 129 |
/* service_tree member */ struct rb_node rb_node; /* service_tree key */ unsigned long rb_key; |
1da177e4c
|
130 131 132 |
/* sorted list of pending requests */ struct rb_root sort_list; /* if fifo isn't expired, next request to serve */ |
5e7053747
|
133 |
struct request *next_rq; |
1da177e4c
|
134 135 136 137 138 |
/* requests queued in sort_list */ int queued[2]; /* currently allocated requests */ int allocated[2]; /* fifo list of requests in sort_list */ |
22e2c507c
|
139 |
struct list_head fifo; |
1da177e4c
|
140 |
|
22e2c507c
|
141 |
unsigned long slice_end; |
c5b680f3b
|
142 |
long slice_resid; |
1da177e4c
|
143 |
|
be754d2c2
|
144 145 |
/* pending metadata requests */ int meta_pending; |
6d048f531
|
146 147 |
/* number of requests that are on the dispatch list or inside driver */ int dispatched; |
22e2c507c
|
148 149 150 151 |
/* io prio of this group */ unsigned short ioprio, org_ioprio; unsigned short ioprio_class, org_ioprio_class; |
7b679138b
|
152 |
pid_t pid; |
1da177e4c
|
153 |
}; |
3b18152c3
|
154 |
enum cfqq_state_flags { |
b0b8d7494
|
155 156 157 158 159 160 161 162 163 |
CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */ CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ |
44f7c1606
|
164 |
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ |
91fac317a
|
165 |
CFQ_CFQQ_FLAG_sync, /* synchronous queue */ |
3b18152c3
|
166 167 168 169 170 |
}; #define CFQ_CFQQ_FNS(name) \ static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ { \ |
fe094d98e
|
171 |
(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ |
3b18152c3
|
172 173 174 |
} \ static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ { \ |
fe094d98e
|
175 |
(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ |
3b18152c3
|
176 177 178 |
} \ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ { \ |
fe094d98e
|
179 |
return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ |
3b18152c3
|
180 181 182 183 184 185 186 187 188 189 |
} CFQ_CFQQ_FNS(on_rr); CFQ_CFQQ_FNS(wait_request); CFQ_CFQQ_FNS(must_alloc); CFQ_CFQQ_FNS(must_alloc_slice); CFQ_CFQQ_FNS(must_dispatch); CFQ_CFQQ_FNS(fifo_expire); CFQ_CFQQ_FNS(idle_window); CFQ_CFQQ_FNS(prio_changed); |
53b03744e
|
190 |
CFQ_CFQQ_FNS(queue_new); |
44f7c1606
|
191 |
CFQ_CFQQ_FNS(slice_new); |
91fac317a
|
192 |
CFQ_CFQQ_FNS(sync); |
3b18152c3
|
193 |
#undef CFQ_CFQQ_FNS |
7b679138b
|
194 195 196 197 |
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) #define cfq_log(cfqd, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) |
165125e1e
|
198 |
static void cfq_dispatch_insert(struct request_queue *, struct request *); |
91fac317a
|
199 |
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, |
fd0928df9
|
200 |
struct io_context *, gfp_t); |
4ac845a2e
|
201 |
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, |
91fac317a
|
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 |
struct io_context *); static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, int is_sync) { return cic->cfqq[!!is_sync]; } static inline void cic_set_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq, int is_sync) { cic->cfqq[!!is_sync] = cfqq; } /* * We regard a request as SYNC, if it's either a read or has the SYNC bit * set (in which case it could also be direct WRITE). */ static inline int cfq_bio_sync(struct bio *bio) { if (bio_data_dir(bio) == READ || bio_sync(bio)) return 1; return 0; } |
1da177e4c
|
227 |
|
1da177e4c
|
228 |
/* |
99f95e528
|
229 230 231 232 233 |
* scheduler run of queue, if there are requests pending and no one in the * driver that will restart queueing */ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { |
7b679138b
|
234 235 |
if (cfqd->busy_queues) { cfq_log(cfqd, "schedule dispatch"); |
99f95e528
|
236 |
kblockd_schedule_work(&cfqd->unplug_work); |
7b679138b
|
237 |
} |
99f95e528
|
238 |
} |
165125e1e
|
239 |
static int cfq_queue_empty(struct request_queue *q) |
99f95e528
|
240 241 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; |
b4878f245
|
242 |
return !cfqd->busy_queues; |
99f95e528
|
243 |
} |
1da177e4c
|
244 |
/* |
44f7c1606
|
245 246 247 248 |
* Scale schedule slice based on io priority. Use the sync time slice only * if a queue is marked sync and has sync io queued. A sync queue with async * io only, should not get full sync slice length. */ |
d9e7620e6
|
249 250 |
static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, unsigned short prio) |
44f7c1606
|
251 |
{ |
d9e7620e6
|
252 |
const int base_slice = cfqd->cfq_slice[sync]; |
44f7c1606
|
253 |
|
d9e7620e6
|
254 255 256 257 |
WARN_ON(prio >= IOPRIO_BE_NR); return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); } |
44f7c1606
|
258 |
|
d9e7620e6
|
259 260 261 262 |
static inline int cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); |
44f7c1606
|
263 264 265 266 267 268 |
} static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; |
7b679138b
|
269 |
cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); |
44f7c1606
|
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 |
} /* * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end * isn't valid until the first request from the dispatch is activated * and the slice time set. */ static inline int cfq_slice_used(struct cfq_queue *cfqq) { if (cfq_cfqq_slice_new(cfqq)) return 0; if (time_before(jiffies, cfqq->slice_end)) return 0; return 1; } /* |
5e7053747
|
288 |
* Lifted from AS - choose which of rq1 and rq2 that is best served now. |
1da177e4c
|
289 |
* We choose the request that is closest to the head right now. Distance |
e8a99053e
|
290 |
* behind the head is penalized and only allowed to a certain extent. |
1da177e4c
|
291 |
*/ |
5e7053747
|
292 293 |
static struct request * cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) |
1da177e4c
|
294 295 |
{ sector_t last, s1, s2, d1 = 0, d2 = 0; |
1da177e4c
|
296 |
unsigned long back_max; |
e8a99053e
|
297 298 299 |
#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ unsigned wrap = 0; /* bit mask: requests behind the disk head? */ |
1da177e4c
|
300 |
|
5e7053747
|
301 302 303 304 |
if (rq1 == NULL || rq1 == rq2) return rq2; if (rq2 == NULL) return rq1; |
9c2c38a12
|
305 |
|
5e7053747
|
306 307 308 309 |
if (rq_is_sync(rq1) && !rq_is_sync(rq2)) return rq1; else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) return rq2; |
374f84ac3
|
310 311 312 313 |
if (rq_is_meta(rq1) && !rq_is_meta(rq2)) return rq1; else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) return rq2; |
1da177e4c
|
314 |
|
5e7053747
|
315 316 |
s1 = rq1->sector; s2 = rq2->sector; |
1da177e4c
|
317 |
|
6d048f531
|
318 |
last = cfqd->last_position; |
1da177e4c
|
319 |
|
1da177e4c
|
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 |
/* * by definition, 1KiB is 2 sectors */ back_max = cfqd->cfq_back_max * 2; /* * Strict one way elevator _except_ in the case where we allow * short backward seeks which are biased as twice the cost of a * similar forward seek. */ if (s1 >= last) d1 = s1 - last; else if (s1 + back_max >= last) d1 = (last - s1) * cfqd->cfq_back_penalty; else |
e8a99053e
|
335 |
wrap |= CFQ_RQ1_WRAP; |
1da177e4c
|
336 337 338 339 340 341 |
if (s2 >= last) d2 = s2 - last; else if (s2 + back_max >= last) d2 = (last - s2) * cfqd->cfq_back_penalty; else |
e8a99053e
|
342 |
wrap |= CFQ_RQ2_WRAP; |
1da177e4c
|
343 344 |
/* Found required data */ |
e8a99053e
|
345 346 347 348 349 350 |
/* * By doing switch() on the bit mask "wrap" we avoid having to * check two variables for all permutations: --> faster! */ switch (wrap) { |
5e7053747
|
351 |
case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ |
e8a99053e
|
352 |
if (d1 < d2) |
5e7053747
|
353 |
return rq1; |
e8a99053e
|
354 |
else if (d2 < d1) |
5e7053747
|
355 |
return rq2; |
e8a99053e
|
356 357 |
else { if (s1 >= s2) |
5e7053747
|
358 |
return rq1; |
e8a99053e
|
359 |
else |
5e7053747
|
360 |
return rq2; |
e8a99053e
|
361 |
} |
1da177e4c
|
362 |
|
e8a99053e
|
363 |
case CFQ_RQ2_WRAP: |
5e7053747
|
364 |
return rq1; |
e8a99053e
|
365 |
case CFQ_RQ1_WRAP: |
5e7053747
|
366 367 |
return rq2; case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ |
e8a99053e
|
368 369 370 371 372 373 374 375 |
default: /* * Since both rqs are wrapped, * start with the one that's further behind head * (--> only *one* back seek required), * since back seek takes more time than forward. */ if (s1 <= s2) |
5e7053747
|
376 |
return rq1; |
1da177e4c
|
377 |
else |
5e7053747
|
378 |
return rq2; |
1da177e4c
|
379 380 |
} } |
498d3aa2b
|
381 382 383 |
/* * The below is leftmost cache rbtree addon */ |
0871714e0
|
384 |
static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) |
cc09e2990
|
385 386 387 |
{ if (!root->left) root->left = rb_first(&root->rb); |
0871714e0
|
388 389 390 391 |
if (root->left) return rb_entry(root->left, struct cfq_queue, rb_node); return NULL; |
cc09e2990
|
392 393 394 395 396 397 398 399 400 401 |
} static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) { if (root->left == n) root->left = NULL; rb_erase(n, &root->rb); RB_CLEAR_NODE(n); } |
1da177e4c
|
402 403 404 |
/* * would be nice to take fifo expire time into account as well */ |
5e7053747
|
405 406 407 |
static struct request * cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *last) |
1da177e4c
|
408 |
{ |
21183b07e
|
409 410 |
struct rb_node *rbnext = rb_next(&last->rb_node); struct rb_node *rbprev = rb_prev(&last->rb_node); |
5e7053747
|
411 |
struct request *next = NULL, *prev = NULL; |
1da177e4c
|
412 |
|
21183b07e
|
413 |
BUG_ON(RB_EMPTY_NODE(&last->rb_node)); |
1da177e4c
|
414 415 |
if (rbprev) |
5e7053747
|
416 |
prev = rb_entry_rq(rbprev); |
1da177e4c
|
417 |
|
21183b07e
|
418 |
if (rbnext) |
5e7053747
|
419 |
next = rb_entry_rq(rbnext); |
21183b07e
|
420 421 422 |
else { rbnext = rb_first(&cfqq->sort_list); if (rbnext && rbnext != &last->rb_node) |
5e7053747
|
423 |
next = rb_entry_rq(rbnext); |
21183b07e
|
424 |
} |
1da177e4c
|
425 |
|
21183b07e
|
426 |
return cfq_choose_req(cfqd, next, prev); |
1da177e4c
|
427 |
} |
d9e7620e6
|
428 429 |
static unsigned long cfq_slice_offset(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
430 |
{ |
d9e7620e6
|
431 432 433 |
/* * just an approximation, should be ok. */ |
67e6b49e3
|
434 435 |
return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) - cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); |
d9e7620e6
|
436 |
} |
498d3aa2b
|
437 438 439 440 441 |
/* * The cfqd->service_tree holds all pending cfq_queue's that have * requests waiting to be processed. It is sorted in the order that * we will service the queues. */ |
d9e7620e6
|
442 |
static void cfq_service_tree_add(struct cfq_data *cfqd, |
edd75ffd9
|
443 |
struct cfq_queue *cfqq, int add_front) |
d9e7620e6
|
444 |
{ |
0871714e0
|
445 446 |
struct rb_node **p, *parent; struct cfq_queue *__cfqq; |
d9e7620e6
|
447 |
unsigned long rb_key; |
498d3aa2b
|
448 |
int left; |
d9e7620e6
|
449 |
|
0871714e0
|
450 451 452 453 454 455 456 457 458 |
if (cfq_class_idle(cfqq)) { rb_key = CFQ_IDLE_DELAY; parent = rb_last(&cfqd->service_tree.rb); if (parent && parent != &cfqq->rb_node) { __cfqq = rb_entry(parent, struct cfq_queue, rb_node); rb_key += __cfqq->rb_key; } else rb_key += jiffies; } else if (!add_front) { |
edd75ffd9
|
459 460 461 462 463 |
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; rb_key += cfqq->slice_resid; cfqq->slice_resid = 0; } else rb_key = 0; |
1da177e4c
|
464 |
|
d9e7620e6
|
465 |
if (!RB_EMPTY_NODE(&cfqq->rb_node)) { |
99f9628ab
|
466 |
/* |
d9e7620e6
|
467 |
* same position, nothing more to do |
99f9628ab
|
468 |
*/ |
d9e7620e6
|
469 470 |
if (rb_key == cfqq->rb_key) return; |
1da177e4c
|
471 |
|
cc09e2990
|
472 |
cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); |
1da177e4c
|
473 |
} |
d9e7620e6
|
474 |
|
498d3aa2b
|
475 |
left = 1; |
0871714e0
|
476 477 |
parent = NULL; p = &cfqd->service_tree.rb.rb_node; |
d9e7620e6
|
478 |
while (*p) { |
67060e379
|
479 |
struct rb_node **n; |
cc09e2990
|
480 |
|
d9e7620e6
|
481 482 |
parent = *p; __cfqq = rb_entry(parent, struct cfq_queue, rb_node); |
0c534e0a4
|
483 484 |
/* * sort RT queues first, we always want to give |
67060e379
|
485 486 |
* preference to them. IDLE queues goes to the back. * after that, sort on the next service time. |
0c534e0a4
|
487 488 |
*/ if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) |
67060e379
|
489 |
n = &(*p)->rb_left; |
0c534e0a4
|
490 |
else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) |
67060e379
|
491 492 493 494 495 |
n = &(*p)->rb_right; else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq)) n = &(*p)->rb_left; else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) n = &(*p)->rb_right; |
0c534e0a4
|
496 |
else if (rb_key < __cfqq->rb_key) |
67060e379
|
497 498 499 500 501 |
n = &(*p)->rb_left; else n = &(*p)->rb_right; if (n == &(*p)->rb_right) |
cc09e2990
|
502 |
left = 0; |
67060e379
|
503 504 |
p = n; |
d9e7620e6
|
505 |
} |
cc09e2990
|
506 507 |
if (left) cfqd->service_tree.left = &cfqq->rb_node; |
d9e7620e6
|
508 509 |
cfqq->rb_key = rb_key; rb_link_node(&cfqq->rb_node, parent, p); |
cc09e2990
|
510 |
rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); |
1da177e4c
|
511 |
} |
498d3aa2b
|
512 513 514 |
/* * Update cfqq's position in the service tree. */ |
edd75ffd9
|
515 |
static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
6d048f531
|
516 |
{ |
6d048f531
|
517 518 519 |
/* * Resorting requires the cfqq to be on the RR list already. */ |
498d3aa2b
|
520 |
if (cfq_cfqq_on_rr(cfqq)) |
edd75ffd9
|
521 |
cfq_service_tree_add(cfqd, cfqq, 0); |
6d048f531
|
522 |
} |
1da177e4c
|
523 524 |
/* * add to busy list of queues for service, trying to be fair in ordering |
22e2c507c
|
525 |
* the pending list according to last request service |
1da177e4c
|
526 |
*/ |
febffd618
|
527 |
static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
528 |
{ |
7b679138b
|
529 |
cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); |
3b18152c3
|
530 531 |
BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_mark_cfqq_on_rr(cfqq); |
1da177e4c
|
532 |
cfqd->busy_queues++; |
edd75ffd9
|
533 |
cfq_resort_rr_list(cfqd, cfqq); |
1da177e4c
|
534 |
} |
498d3aa2b
|
535 536 537 538 |
/* * Called when the cfqq no longer has requests pending, remove it from * the service tree. */ |
febffd618
|
539 |
static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
540 |
{ |
7b679138b
|
541 |
cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); |
3b18152c3
|
542 543 |
BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_clear_cfqq_on_rr(cfqq); |
1da177e4c
|
544 |
|
cc09e2990
|
545 546 |
if (!RB_EMPTY_NODE(&cfqq->rb_node)) cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); |
d9e7620e6
|
547 |
|
1da177e4c
|
548 549 550 551 552 553 554 |
BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; } /* * rb tree support functions */ |
febffd618
|
555 |
static void cfq_del_rq_rb(struct request *rq) |
1da177e4c
|
556 |
{ |
5e7053747
|
557 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
b4878f245
|
558 |
struct cfq_data *cfqd = cfqq->cfqd; |
5e7053747
|
559 |
const int sync = rq_is_sync(rq); |
1da177e4c
|
560 |
|
b4878f245
|
561 562 |
BUG_ON(!cfqq->queued[sync]); cfqq->queued[sync]--; |
1da177e4c
|
563 |
|
5e7053747
|
564 |
elv_rb_del(&cfqq->sort_list, rq); |
1da177e4c
|
565 |
|
dd67d0515
|
566 |
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) |
b4878f245
|
567 |
cfq_del_cfqq_rr(cfqd, cfqq); |
1da177e4c
|
568 |
} |
5e7053747
|
569 |
static void cfq_add_rq_rb(struct request *rq) |
1da177e4c
|
570 |
{ |
5e7053747
|
571 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
1da177e4c
|
572 |
struct cfq_data *cfqd = cfqq->cfqd; |
21183b07e
|
573 |
struct request *__alias; |
1da177e4c
|
574 |
|
5380a101d
|
575 |
cfqq->queued[rq_is_sync(rq)]++; |
1da177e4c
|
576 577 578 579 580 |
/* * looks a little odd, but the first insert might return an alias. * if that happens, put the alias on the dispatch list */ |
21183b07e
|
581 |
while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) |
5e7053747
|
582 |
cfq_dispatch_insert(cfqd->queue, __alias); |
5fccbf61b
|
583 584 585 |
if (!cfq_cfqq_on_rr(cfqq)) cfq_add_cfqq_rr(cfqd, cfqq); |
5044eed48
|
586 587 588 589 590 591 |
/* * check if this request is a better next-serve candidate */ cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); BUG_ON(!cfqq->next_rq); |
1da177e4c
|
592 |
} |
febffd618
|
593 |
static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) |
1da177e4c
|
594 |
{ |
5380a101d
|
595 596 |
elv_rb_del(&cfqq->sort_list, rq); cfqq->queued[rq_is_sync(rq)]--; |
5e7053747
|
597 |
cfq_add_rq_rb(rq); |
1da177e4c
|
598 |
} |
206dc69b3
|
599 600 |
static struct request * cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) |
1da177e4c
|
601 |
{ |
206dc69b3
|
602 |
struct task_struct *tsk = current; |
91fac317a
|
603 |
struct cfq_io_context *cic; |
206dc69b3
|
604 |
struct cfq_queue *cfqq; |
1da177e4c
|
605 |
|
4ac845a2e
|
606 |
cic = cfq_cic_lookup(cfqd, tsk->io_context); |
91fac317a
|
607 608 609 610 |
if (!cic) return NULL; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); |
89850f7ee
|
611 612 |
if (cfqq) { sector_t sector = bio->bi_sector + bio_sectors(bio); |
21183b07e
|
613 |
return elv_rb_find(&cfqq->sort_list, sector); |
89850f7ee
|
614 |
} |
1da177e4c
|
615 |
|
1da177e4c
|
616 617 |
return NULL; } |
165125e1e
|
618 |
static void cfq_activate_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
619 |
{ |
22e2c507c
|
620 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
3b18152c3
|
621 |
|
b4878f245
|
622 |
cfqd->rq_in_driver++; |
7b679138b
|
623 624 |
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", cfqd->rq_in_driver); |
25776e359
|
625 626 627 628 629 630 631 632 633 |
/* * If the depth is larger 1, it really could be queueing. But lets * make the mark a little higher - idling could still be good for * low queueing, and a low queueing number could also just indicate * a SCSI mid layer like behaviour where limit+1 is often seen. */ if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) cfqd->hw_tag = 1; |
6d048f531
|
634 635 |
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; |
1da177e4c
|
636 |
} |
165125e1e
|
637 |
static void cfq_deactivate_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
638 |
{ |
b4878f245
|
639 640 641 642 |
struct cfq_data *cfqd = q->elevator->elevator_data; WARN_ON(!cfqd->rq_in_driver); cfqd->rq_in_driver--; |
7b679138b
|
643 644 |
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", cfqd->rq_in_driver); |
1da177e4c
|
645 |
} |
b4878f245
|
646 |
static void cfq_remove_request(struct request *rq) |
1da177e4c
|
647 |
{ |
5e7053747
|
648 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
21183b07e
|
649 |
|
5e7053747
|
650 651 |
if (cfqq->next_rq == rq) cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); |
1da177e4c
|
652 |
|
b4878f245
|
653 |
list_del_init(&rq->queuelist); |
5e7053747
|
654 |
cfq_del_rq_rb(rq); |
374f84ac3
|
655 656 657 658 659 |
if (rq_is_meta(rq)) { WARN_ON(!cfqq->meta_pending); cfqq->meta_pending--; } |
1da177e4c
|
660 |
} |
165125e1e
|
661 662 |
static int cfq_merge(struct request_queue *q, struct request **req, struct bio *bio) |
1da177e4c
|
663 664 665 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; struct request *__rq; |
1da177e4c
|
666 |
|
206dc69b3
|
667 |
__rq = cfq_find_rq_fmerge(cfqd, bio); |
22e2c507c
|
668 |
if (__rq && elv_rq_merge_ok(__rq, bio)) { |
9817064b6
|
669 670 |
*req = __rq; return ELEVATOR_FRONT_MERGE; |
1da177e4c
|
671 672 673 |
} return ELEVATOR_NO_MERGE; |
1da177e4c
|
674 |
} |
165125e1e
|
675 |
static void cfq_merged_request(struct request_queue *q, struct request *req, |
21183b07e
|
676 |
int type) |
1da177e4c
|
677 |
{ |
21183b07e
|
678 |
if (type == ELEVATOR_FRONT_MERGE) { |
5e7053747
|
679 |
struct cfq_queue *cfqq = RQ_CFQQ(req); |
1da177e4c
|
680 |
|
5e7053747
|
681 |
cfq_reposition_rq_rb(cfqq, req); |
1da177e4c
|
682 |
} |
1da177e4c
|
683 684 685 |
} static void |
165125e1e
|
686 |
cfq_merged_requests(struct request_queue *q, struct request *rq, |
1da177e4c
|
687 688 |
struct request *next) { |
22e2c507c
|
689 690 691 692 693 694 |
/* * reposition in fifo if next is older than rq */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && time_before(next->start_time, rq->start_time)) list_move(&rq->queuelist, &next->queuelist); |
b4878f245
|
695 |
cfq_remove_request(next); |
22e2c507c
|
696 |
} |
165125e1e
|
697 |
static int cfq_allow_merge(struct request_queue *q, struct request *rq, |
da7752650
|
698 699 700 |
struct bio *bio) { struct cfq_data *cfqd = q->elevator->elevator_data; |
91fac317a
|
701 |
struct cfq_io_context *cic; |
da7752650
|
702 |
struct cfq_queue *cfqq; |
da7752650
|
703 704 |
/* |
ec8acb690
|
705 |
* Disallow merge of a sync bio into an async request. |
da7752650
|
706 |
*/ |
91fac317a
|
707 |
if (cfq_bio_sync(bio) && !rq_is_sync(rq)) |
da7752650
|
708 709 710 |
return 0; /* |
719d34027
|
711 712 |
* Lookup the cfqq that this bio will be queued with. Allow * merge only if rq is queued there. |
da7752650
|
713 |
*/ |
4ac845a2e
|
714 |
cic = cfq_cic_lookup(cfqd, current->io_context); |
91fac317a
|
715 716 |
if (!cic) return 0; |
719d34027
|
717 |
|
91fac317a
|
718 |
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); |
719d34027
|
719 720 |
if (cfqq == RQ_CFQQ(rq)) return 1; |
da7752650
|
721 |
|
ec8acb690
|
722 |
return 0; |
da7752650
|
723 |
} |
febffd618
|
724 725 |
static void __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
22e2c507c
|
726 727 |
{ if (cfqq) { |
7b679138b
|
728 |
cfq_log_cfqq(cfqd, cfqq, "set_active"); |
22e2c507c
|
729 |
cfqq->slice_end = 0; |
3b18152c3
|
730 731 |
cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_fifo_expire(cfqq); |
44f7c1606
|
732 |
cfq_mark_cfqq_slice_new(cfqq); |
1afba0451
|
733 |
cfq_clear_cfqq_queue_new(cfqq); |
22e2c507c
|
734 735 736 737 738 739 |
} cfqd->active_queue = cfqq; } /* |
7b14e3b52
|
740 741 742 743 |
* current cfqq expired its slice (or was too idle), select new one */ static void __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
6084cdda0
|
744 |
int timed_out) |
7b14e3b52
|
745 |
{ |
7b679138b
|
746 |
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); |
7b14e3b52
|
747 748 |
if (cfq_cfqq_wait_request(cfqq)) del_timer(&cfqd->idle_slice_timer); |
7b14e3b52
|
749 750 751 752 |
cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); /* |
6084cdda0
|
753 |
* store what was left of this slice, if the queue idled/timed out |
7b14e3b52
|
754 |
*/ |
7b679138b
|
755 |
if (timed_out && !cfq_cfqq_slice_new(cfqq)) { |
c5b680f3b
|
756 |
cfqq->slice_resid = cfqq->slice_end - jiffies; |
7b679138b
|
757 758 |
cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); } |
7b14e3b52
|
759 |
|
edd75ffd9
|
760 |
cfq_resort_rr_list(cfqd, cfqq); |
7b14e3b52
|
761 762 763 764 765 766 767 768 |
if (cfqq == cfqd->active_queue) cfqd->active_queue = NULL; if (cfqd->active_cic) { put_io_context(cfqd->active_cic->ioc); cfqd->active_cic = NULL; } |
7b14e3b52
|
769 |
} |
6084cdda0
|
770 |
static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) |
7b14e3b52
|
771 772 773 774 |
{ struct cfq_queue *cfqq = cfqd->active_queue; if (cfqq) |
6084cdda0
|
775 |
__cfq_slice_expired(cfqd, cfqq, timed_out); |
7b14e3b52
|
776 |
} |
498d3aa2b
|
777 778 779 780 |
/* * Get next queue for service. Unless we have a queue preemption, * we'll simply select the first cfqq in the service tree. */ |
6d048f531
|
781 |
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) |
22e2c507c
|
782 |
{ |
edd75ffd9
|
783 784 |
if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) return NULL; |
d9e7620e6
|
785 |
|
0871714e0
|
786 |
return cfq_rb_first(&cfqd->service_tree); |
6d048f531
|
787 |
} |
498d3aa2b
|
788 789 790 |
/* * Get and set a new active queue for service. */ |
6d048f531
|
791 792 793 |
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) { struct cfq_queue *cfqq; |
d9e7620e6
|
794 |
cfqq = cfq_get_next_queue(cfqd); |
22e2c507c
|
795 |
__cfq_set_active_queue(cfqd, cfqq); |
3b18152c3
|
796 |
return cfqq; |
22e2c507c
|
797 |
} |
d9e7620e6
|
798 799 800 801 802 803 804 805 |
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, struct request *rq) { if (rq->sector >= cfqd->last_position) return rq->sector - cfqd->last_position; else return cfqd->last_position - rq->sector; } |
6d048f531
|
806 807 808 809 810 811 812 813 814 |
static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) { struct cfq_io_context *cic = cfqd->active_cic; if (!sample_valid(cic->seek_samples)) return 0; return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; } |
d9e7620e6
|
815 816 |
static int cfq_close_cooperator(struct cfq_data *cfq_data, struct cfq_queue *cfqq) |
6d048f531
|
817 |
{ |
6d048f531
|
818 |
/* |
d9e7620e6
|
819 820 821 |
* We should notice if some of the queues are cooperating, eg * working closely on the same area of the disk. In that case, * we can group them together and don't waste time idling. |
6d048f531
|
822 |
*/ |
d9e7620e6
|
823 |
return 0; |
6d048f531
|
824 825 826 |
} #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) |
caaa5f9f0
|
827 |
|
6d048f531
|
828 |
static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
22e2c507c
|
829 |
{ |
1792669cc
|
830 |
struct cfq_queue *cfqq = cfqd->active_queue; |
206dc69b3
|
831 |
struct cfq_io_context *cic; |
7b14e3b52
|
832 |
unsigned long sl; |
dd67d0515
|
833 |
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); |
6d048f531
|
834 |
WARN_ON(cfq_cfqq_slice_new(cfqq)); |
22e2c507c
|
835 836 837 838 |
/* * idle is disabled, either manually or by past process history */ |
6d048f531
|
839 840 |
if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) return; |
22e2c507c
|
841 |
/* |
7b679138b
|
842 843 844 845 846 847 |
* still requests with the driver, don't idle */ if (cfqd->rq_in_driver) return; /* |
22e2c507c
|
848 849 |
* task has exited, don't wait */ |
206dc69b3
|
850 |
cic = cfqd->active_cic; |
66dac98ed
|
851 |
if (!cic || !atomic_read(&cic->ioc->nr_tasks)) |
6d048f531
|
852 853 854 855 856 |
return; /* * See if this prio level has a good candidate */ |
1afba0451
|
857 858 |
if (cfq_close_cooperator(cfqd, cfqq) && (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) |
6d048f531
|
859 |
return; |
22e2c507c
|
860 |
|
3b18152c3
|
861 862 |
cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_wait_request(cfqq); |
22e2c507c
|
863 |
|
206dc69b3
|
864 865 866 867 868 |
/* * we don't want to idle for seeks, but we do want to allow * fair distribution of slice time for a process doing back-to-back * seeks. so allow a little bit of time for him to submit a new rq */ |
6d048f531
|
869 |
sl = cfqd->cfq_slice_idle; |
caaa5f9f0
|
870 |
if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) |
d9e7620e6
|
871 |
sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); |
206dc69b3
|
872 |
|
7b14e3b52
|
873 |
mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
7b679138b
|
874 |
cfq_log(cfqd, "arm_idle: %lu", sl); |
1da177e4c
|
875 |
} |
498d3aa2b
|
876 877 878 |
/* * Move request from internal lists to the request queue dispatch list. */ |
165125e1e
|
879 |
static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) |
1da177e4c
|
880 |
{ |
3ed9a2965
|
881 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
5e7053747
|
882 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
22e2c507c
|
883 |
|
7b679138b
|
884 |
cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); |
5380a101d
|
885 |
cfq_remove_request(rq); |
6d048f531
|
886 |
cfqq->dispatched++; |
5380a101d
|
887 |
elv_dispatch_sort(q, rq); |
3ed9a2965
|
888 889 890 |
if (cfq_cfqq_sync(cfqq)) cfqd->sync_flight++; |
1da177e4c
|
891 892 893 894 895 |
} /* * return expired entry, or NULL to just start from scratch in rbtree */ |
febffd618
|
896 |
static struct request *cfq_check_fifo(struct cfq_queue *cfqq) |
1da177e4c
|
897 898 |
{ struct cfq_data *cfqd = cfqq->cfqd; |
22e2c507c
|
899 |
struct request *rq; |
89850f7ee
|
900 |
int fifo; |
1da177e4c
|
901 |
|
3b18152c3
|
902 |
if (cfq_cfqq_fifo_expire(cfqq)) |
1da177e4c
|
903 |
return NULL; |
cb8874119
|
904 905 |
cfq_mark_cfqq_fifo_expire(cfqq); |
89850f7ee
|
906 907 |
if (list_empty(&cfqq->fifo)) return NULL; |
1da177e4c
|
908 |
|
6d048f531
|
909 |
fifo = cfq_cfqq_sync(cfqq); |
89850f7ee
|
910 |
rq = rq_entry_fifo(cfqq->fifo.next); |
1da177e4c
|
911 |
|
6d048f531
|
912 |
if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) |
7b679138b
|
913 |
rq = NULL; |
1da177e4c
|
914 |
|
7b679138b
|
915 |
cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); |
6d048f531
|
916 |
return rq; |
1da177e4c
|
917 |
} |
22e2c507c
|
918 919 920 921 |
static inline int cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { const int base_rq = cfqd->cfq_slice_async_rq; |
1da177e4c
|
922 |
|
22e2c507c
|
923 |
WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); |
1da177e4c
|
924 |
|
22e2c507c
|
925 |
return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); |
1da177e4c
|
926 |
} |
22e2c507c
|
927 |
/* |
498d3aa2b
|
928 929 |
* Select a queue for service. If we have a current active queue, * check whether to continue servicing it, or retrieve and set a new one. |
22e2c507c
|
930 |
*/ |
1b5ed5e1f
|
931 |
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) |
1da177e4c
|
932 |
{ |
1da177e4c
|
933 |
struct cfq_queue *cfqq; |
1da177e4c
|
934 |
|
22e2c507c
|
935 936 937 |
cfqq = cfqd->active_queue; if (!cfqq) goto new_queue; |
1da177e4c
|
938 |
|
22e2c507c
|
939 |
/* |
6d048f531
|
940 |
* The active queue has run out of time, expire it and select new. |
22e2c507c
|
941 |
*/ |
6d048f531
|
942 |
if (cfq_slice_used(cfqq)) |
3b18152c3
|
943 |
goto expire; |
1da177e4c
|
944 |
|
22e2c507c
|
945 |
/* |
6d048f531
|
946 947 |
* The active queue has requests and isn't expired, allow it to * dispatch. |
22e2c507c
|
948 |
*/ |
dd67d0515
|
949 |
if (!RB_EMPTY_ROOT(&cfqq->sort_list)) |
22e2c507c
|
950 |
goto keep_queue; |
6d048f531
|
951 952 953 954 955 956 |
/* * No requests pending. If the active queue still has requests in * flight or is idling for a new request, allow either of these * conditions to happen (or time out) before selecting a new queue. */ |
cc1974797
|
957 958 |
if (timer_pending(&cfqd->idle_slice_timer) || (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { |
caaa5f9f0
|
959 960 |
cfqq = NULL; goto keep_queue; |
22e2c507c
|
961 |
} |
3b18152c3
|
962 |
expire: |
6084cdda0
|
963 |
cfq_slice_expired(cfqd, 0); |
3b18152c3
|
964 965 |
new_queue: cfqq = cfq_set_active_queue(cfqd); |
22e2c507c
|
966 |
keep_queue: |
3b18152c3
|
967 |
return cfqq; |
22e2c507c
|
968 |
} |
498d3aa2b
|
969 970 971 972 |
/* * Dispatch some requests from cfqq, moving them to the request queue * dispatch list. */ |
22e2c507c
|
973 974 975 976 977 |
static int __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, int max_dispatch) { int dispatched = 0; |
dd67d0515
|
978 |
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); |
22e2c507c
|
979 980 |
do { |
5e7053747
|
981 |
struct request *rq; |
1da177e4c
|
982 983 |
/* |
22e2c507c
|
984 |
* follow expired path, else get first next available |
1da177e4c
|
985 |
*/ |
fe094d98e
|
986 987 |
rq = cfq_check_fifo(cfqq); if (rq == NULL) |
5e7053747
|
988 |
rq = cfqq->next_rq; |
22e2c507c
|
989 990 991 992 |
/* * finally, insert request into driver dispatch list */ |
5e7053747
|
993 |
cfq_dispatch_insert(cfqd->queue, rq); |
1da177e4c
|
994 |
|
22e2c507c
|
995 |
dispatched++; |
1da177e4c
|
996 |
|
22e2c507c
|
997 |
if (!cfqd->active_cic) { |
5e7053747
|
998 999 |
atomic_inc(&RQ_CIC(rq)->ioc->refcount); cfqd->active_cic = RQ_CIC(rq); |
22e2c507c
|
1000 |
} |
1da177e4c
|
1001 |
|
dd67d0515
|
1002 |
if (RB_EMPTY_ROOT(&cfqq->sort_list)) |
22e2c507c
|
1003 1004 1005 1006 1007 |
break; } while (dispatched < max_dispatch); /* |
22e2c507c
|
1008 1009 1010 |
* expire an async queue immediately if it has used up its slice. idle * queue always expire after 1 dispatch round. */ |
a99380065
|
1011 |
if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && |
20e493a8d
|
1012 |
dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) || |
a99380065
|
1013 |
cfq_class_idle(cfqq))) { |
44f7c1606
|
1014 |
cfqq->slice_end = jiffies + 1; |
6084cdda0
|
1015 |
cfq_slice_expired(cfqd, 0); |
44f7c1606
|
1016 |
} |
22e2c507c
|
1017 1018 1019 |
return dispatched; } |
febffd618
|
1020 |
static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) |
d9e7620e6
|
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 |
{ int dispatched = 0; while (cfqq->next_rq) { cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); dispatched++; } BUG_ON(!list_empty(&cfqq->fifo)); return dispatched; } |
498d3aa2b
|
1032 1033 1034 1035 |
/* * Drain our current requests. Used for barriers and when switching * io schedulers on-the-fly. */ |
d9e7620e6
|
1036 |
static int cfq_forced_dispatch(struct cfq_data *cfqd) |
1b5ed5e1f
|
1037 |
{ |
0871714e0
|
1038 |
struct cfq_queue *cfqq; |
d9e7620e6
|
1039 |
int dispatched = 0; |
1b5ed5e1f
|
1040 |
|
0871714e0
|
1041 |
while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) |
d9e7620e6
|
1042 |
dispatched += __cfq_forced_dispatch_cfqq(cfqq); |
1b5ed5e1f
|
1043 |
|
6084cdda0
|
1044 |
cfq_slice_expired(cfqd, 0); |
1b5ed5e1f
|
1045 1046 |
BUG_ON(cfqd->busy_queues); |
7b679138b
|
1047 1048 |
cfq_log(cfqd, "forced_dispatch=%d ", dispatched); |
1b5ed5e1f
|
1049 1050 |
return dispatched; } |
165125e1e
|
1051 |
static int cfq_dispatch_requests(struct request_queue *q, int force) |
22e2c507c
|
1052 1053 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; |
6d048f531
|
1054 |
struct cfq_queue *cfqq; |
caaa5f9f0
|
1055 |
int dispatched; |
22e2c507c
|
1056 1057 1058 |
if (!cfqd->busy_queues) return 0; |
1b5ed5e1f
|
1059 1060 |
if (unlikely(force)) return cfq_forced_dispatch(cfqd); |
caaa5f9f0
|
1061 |
dispatched = 0; |
caaa5f9f0
|
1062 |
while ((cfqq = cfq_select_queue(cfqd)) != NULL) { |
b4878f245
|
1063 |
int max_dispatch; |
3ed9a2965
|
1064 1065 1066 1067 1068 1069 |
max_dispatch = cfqd->cfq_quantum; if (cfq_class_idle(cfqq)) max_dispatch = 1; if (cfqq->dispatched >= max_dispatch) { if (cfqd->busy_queues > 1) |
6d048f531
|
1070 |
break; |
3ed9a2965
|
1071 |
if (cfqq->dispatched >= 4 * max_dispatch) |
a99380065
|
1072 1073 |
break; } |
9ede209e8
|
1074 |
|
3ed9a2965
|
1075 1076 |
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) break; |
3b18152c3
|
1077 1078 |
cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); |
22e2c507c
|
1079 |
del_timer(&cfqd->idle_slice_timer); |
caaa5f9f0
|
1080 |
dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); |
1da177e4c
|
1081 |
} |
7b679138b
|
1082 |
cfq_log(cfqd, "dispatched=%d", dispatched); |
caaa5f9f0
|
1083 |
return dispatched; |
1da177e4c
|
1084 |
} |
1da177e4c
|
1085 |
/* |
5e7053747
|
1086 1087 |
* task holds one reference to the queue, dropped when task exits. each rq * in-flight on this queue also holds a reference, dropped when rq is freed. |
1da177e4c
|
1088 1089 1090 1091 1092 |
* * queue lock must be held here. */ static void cfq_put_queue(struct cfq_queue *cfqq) { |
22e2c507c
|
1093 1094 1095 |
struct cfq_data *cfqd = cfqq->cfqd; BUG_ON(atomic_read(&cfqq->ref) <= 0); |
1da177e4c
|
1096 1097 1098 |
if (!atomic_dec_and_test(&cfqq->ref)) return; |
7b679138b
|
1099 |
cfq_log_cfqq(cfqd, cfqq, "put_queue"); |
1da177e4c
|
1100 |
BUG_ON(rb_first(&cfqq->sort_list)); |
22e2c507c
|
1101 |
BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); |
3b18152c3
|
1102 |
BUG_ON(cfq_cfqq_on_rr(cfqq)); |
1da177e4c
|
1103 |
|
28f95cbc3
|
1104 |
if (unlikely(cfqd->active_queue == cfqq)) { |
6084cdda0
|
1105 |
__cfq_slice_expired(cfqd, cfqq, 0); |
28f95cbc3
|
1106 1107 |
cfq_schedule_dispatch(cfqd); } |
22e2c507c
|
1108 |
|
1da177e4c
|
1109 1110 |
kmem_cache_free(cfq_pool, cfqq); } |
d6de8be71
|
1111 1112 1113 |
/* * Must always be called with the rcu_read_lock() held */ |
07416d29b
|
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 |
static void __call_for_each_cic(struct io_context *ioc, void (*func)(struct io_context *, struct cfq_io_context *)) { struct cfq_io_context *cic; struct hlist_node *n; hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) func(ioc, cic); } |
4ac845a2e
|
1124 |
/* |
34e6bbf23
|
1125 |
* Call func for each cic attached to this ioc. |
4ac845a2e
|
1126 |
*/ |
34e6bbf23
|
1127 |
static void |
4ac845a2e
|
1128 1129 |
call_for_each_cic(struct io_context *ioc, void (*func)(struct io_context *, struct cfq_io_context *)) |
1da177e4c
|
1130 |
{ |
4ac845a2e
|
1131 |
rcu_read_lock(); |
07416d29b
|
1132 |
__call_for_each_cic(ioc, func); |
4ac845a2e
|
1133 |
rcu_read_unlock(); |
34e6bbf23
|
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 |
} static void cfq_cic_free_rcu(struct rcu_head *head) { struct cfq_io_context *cic; cic = container_of(head, struct cfq_io_context, rcu_head); kmem_cache_free(cfq_ioc_pool, cic); elv_ioc_count_dec(ioc_count); |
9a11b4ed0
|
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 |
if (ioc_gone) { /* * CFQ scheduler is exiting, grab exit lock and check * the pending io context count. If it hits zero, * complete ioc_gone and set it back to NULL */ spin_lock(&ioc_gone_lock); if (ioc_gone && !elv_ioc_count_read(ioc_count)) { complete(ioc_gone); ioc_gone = NULL; } spin_unlock(&ioc_gone_lock); } |
34e6bbf23
|
1157 |
} |
4ac845a2e
|
1158 |
|
34e6bbf23
|
1159 1160 1161 |
static void cfq_cic_free(struct cfq_io_context *cic) { call_rcu(&cic->rcu_head, cfq_cic_free_rcu); |
4ac845a2e
|
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 |
} static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) { unsigned long flags; BUG_ON(!cic->dead_key); spin_lock_irqsave(&ioc->lock, flags); radix_tree_delete(&ioc->radix_root, cic->dead_key); |
ffc4e7595
|
1172 |
hlist_del_rcu(&cic->cic_list); |
4ac845a2e
|
1173 |
spin_unlock_irqrestore(&ioc->lock, flags); |
34e6bbf23
|
1174 |
cfq_cic_free(cic); |
4ac845a2e
|
1175 |
} |
d6de8be71
|
1176 1177 1178 1179 1180 |
/* * Must be called with rcu_read_lock() held or preemption otherwise disabled. * Only two callers of this - ->dtor() which is called with the rcu_read_lock(), * and ->trim() which is called with the task lock held */ |
4ac845a2e
|
1181 1182 |
static void cfq_free_io_context(struct io_context *ioc) { |
4ac845a2e
|
1183 |
/* |
34e6bbf23
|
1184 1185 1186 1187 |
* ioc->refcount is zero here, or we are called from elv_unregister(), * so no more cic's are allowed to be linked into this ioc. So it * should be ok to iterate over the known list, we will see all cic's * since no new ones are added. |
4ac845a2e
|
1188 |
*/ |
07416d29b
|
1189 |
__call_for_each_cic(ioc, cic_free_func); |
1da177e4c
|
1190 |
} |
89850f7ee
|
1191 |
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
1192 |
{ |
28f95cbc3
|
1193 |
if (unlikely(cfqq == cfqd->active_queue)) { |
6084cdda0
|
1194 |
__cfq_slice_expired(cfqd, cfqq, 0); |
28f95cbc3
|
1195 1196 |
cfq_schedule_dispatch(cfqd); } |
22e2c507c
|
1197 |
|
89850f7ee
|
1198 1199 |
cfq_put_queue(cfqq); } |
22e2c507c
|
1200 |
|
89850f7ee
|
1201 1202 1203 |
static void __cfq_exit_single_io_context(struct cfq_data *cfqd, struct cfq_io_context *cic) { |
4faa3c815
|
1204 |
struct io_context *ioc = cic->ioc; |
fc46379da
|
1205 |
list_del_init(&cic->queue_list); |
4ac845a2e
|
1206 1207 1208 1209 |
/* * Make sure key == NULL is seen for dead queues */ |
fc46379da
|
1210 |
smp_wmb(); |
4ac845a2e
|
1211 |
cic->dead_key = (unsigned long) cic->key; |
fc46379da
|
1212 |
cic->key = NULL; |
4faa3c815
|
1213 1214 |
if (ioc->ioc_data == cic) rcu_assign_pointer(ioc->ioc_data, NULL); |
12a057321
|
1215 |
if (cic->cfqq[ASYNC]) { |
89850f7ee
|
1216 |
cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); |
12a057321
|
1217 1218 1219 1220 |
cic->cfqq[ASYNC] = NULL; } if (cic->cfqq[SYNC]) { |
89850f7ee
|
1221 |
cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); |
12a057321
|
1222 1223 |
cic->cfqq[SYNC] = NULL; } |
89850f7ee
|
1224 |
} |
4ac845a2e
|
1225 1226 |
static void cfq_exit_single_io_context(struct io_context *ioc, struct cfq_io_context *cic) |
89850f7ee
|
1227 1228 |
{ struct cfq_data *cfqd = cic->key; |
89850f7ee
|
1229 |
if (cfqd) { |
165125e1e
|
1230 |
struct request_queue *q = cfqd->queue; |
4ac845a2e
|
1231 |
unsigned long flags; |
89850f7ee
|
1232 |
|
4ac845a2e
|
1233 |
spin_lock_irqsave(q->queue_lock, flags); |
89850f7ee
|
1234 |
__cfq_exit_single_io_context(cfqd, cic); |
4ac845a2e
|
1235 |
spin_unlock_irqrestore(q->queue_lock, flags); |
89850f7ee
|
1236 |
} |
1da177e4c
|
1237 |
} |
498d3aa2b
|
1238 1239 1240 1241 |
/* * The process that ioc belongs to has exited, we need to clean up * and put the internal structures we have that belongs to that process. */ |
e2d74ac06
|
1242 |
static void cfq_exit_io_context(struct io_context *ioc) |
1da177e4c
|
1243 |
{ |
4ac845a2e
|
1244 |
call_for_each_cic(ioc, cfq_exit_single_io_context); |
1da177e4c
|
1245 |
} |
22e2c507c
|
1246 |
static struct cfq_io_context * |
8267e268e
|
1247 |
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) |
1da177e4c
|
1248 |
{ |
b5deef901
|
1249 |
struct cfq_io_context *cic; |
1da177e4c
|
1250 |
|
94f6030ca
|
1251 1252 |
cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, cfqd->queue->node); |
1da177e4c
|
1253 |
if (cic) { |
22e2c507c
|
1254 |
cic->last_end_request = jiffies; |
553698f94
|
1255 |
INIT_LIST_HEAD(&cic->queue_list); |
ffc4e7595
|
1256 |
INIT_HLIST_NODE(&cic->cic_list); |
22e2c507c
|
1257 1258 |
cic->dtor = cfq_free_io_context; cic->exit = cfq_exit_io_context; |
4050cf167
|
1259 |
elv_ioc_count_inc(ioc_count); |
1da177e4c
|
1260 1261 1262 1263 |
} return cic; } |
fd0928df9
|
1264 |
static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) |
22e2c507c
|
1265 1266 1267 |
{ struct task_struct *tsk = current; int ioprio_class; |
3b18152c3
|
1268 |
if (!cfq_cfqq_prio_changed(cfqq)) |
22e2c507c
|
1269 |
return; |
fd0928df9
|
1270 |
ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); |
22e2c507c
|
1271 |
switch (ioprio_class) { |
fe094d98e
|
1272 1273 1274 1275 1276 |
default: printk(KERN_ERR "cfq: bad prio %x ", ioprio_class); case IOPRIO_CLASS_NONE: /* |
6d63c2755
|
1277 |
* no prio set, inherit CPU scheduling settings |
fe094d98e
|
1278 1279 |
*/ cfqq->ioprio = task_nice_ioprio(tsk); |
6d63c2755
|
1280 |
cfqq->ioprio_class = task_nice_ioclass(tsk); |
fe094d98e
|
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 |
break; case IOPRIO_CLASS_RT: cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio_class = IOPRIO_CLASS_RT; break; case IOPRIO_CLASS_BE: cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio_class = IOPRIO_CLASS_BE; break; case IOPRIO_CLASS_IDLE: cfqq->ioprio_class = IOPRIO_CLASS_IDLE; cfqq->ioprio = 7; cfq_clear_cfqq_idle_window(cfqq); break; |
22e2c507c
|
1295 1296 1297 1298 1299 1300 1301 1302 |
} /* * keep track of original prio settings in case we have to temporarily * elevate the priority of this queue */ cfqq->org_ioprio = cfqq->ioprio; cfqq->org_ioprio_class = cfqq->ioprio_class; |
3b18152c3
|
1303 |
cfq_clear_cfqq_prio_changed(cfqq); |
22e2c507c
|
1304 |
} |
febffd618
|
1305 |
static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) |
22e2c507c
|
1306 |
{ |
478a82b0e
|
1307 1308 |
struct cfq_data *cfqd = cic->key; struct cfq_queue *cfqq; |
c1b707d25
|
1309 |
unsigned long flags; |
35e6077cb
|
1310 |
|
caaa5f9f0
|
1311 1312 |
if (unlikely(!cfqd)) return; |
c1b707d25
|
1313 |
spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
caaa5f9f0
|
1314 1315 1316 1317 |
cfqq = cic->cfqq[ASYNC]; if (cfqq) { struct cfq_queue *new_cfqq; |
fd0928df9
|
1318 |
new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC); |
caaa5f9f0
|
1319 1320 1321 1322 |
if (new_cfqq) { cic->cfqq[ASYNC] = new_cfqq; cfq_put_queue(cfqq); } |
22e2c507c
|
1323 |
} |
caaa5f9f0
|
1324 1325 1326 1327 |
cfqq = cic->cfqq[SYNC]; if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); |
c1b707d25
|
1328 |
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
22e2c507c
|
1329 |
} |
fc46379da
|
1330 |
static void cfq_ioc_set_ioprio(struct io_context *ioc) |
22e2c507c
|
1331 |
{ |
4ac845a2e
|
1332 |
call_for_each_cic(ioc, changed_ioprio); |
fc46379da
|
1333 |
ioc->ioprio_changed = 0; |
22e2c507c
|
1334 1335 1336 |
} static struct cfq_queue * |
15c31be4d
|
1337 |
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, |
fd0928df9
|
1338 |
struct io_context *ioc, gfp_t gfp_mask) |
22e2c507c
|
1339 |
{ |
22e2c507c
|
1340 |
struct cfq_queue *cfqq, *new_cfqq = NULL; |
91fac317a
|
1341 |
struct cfq_io_context *cic; |
22e2c507c
|
1342 1343 |
retry: |
4ac845a2e
|
1344 |
cic = cfq_cic_lookup(cfqd, ioc); |
91fac317a
|
1345 1346 |
/* cic always exists here */ cfqq = cic_to_cfqq(cic, is_sync); |
22e2c507c
|
1347 1348 1349 1350 1351 1352 |
if (!cfqq) { if (new_cfqq) { cfqq = new_cfqq; new_cfqq = NULL; } else if (gfp_mask & __GFP_WAIT) { |
89850f7ee
|
1353 1354 1355 1356 1357 1358 |
/* * Inform the allocator of the fact that we will * just repeat this allocation if it fails, to allow * the allocator to do whatever it needs to attempt to * free memory. */ |
22e2c507c
|
1359 |
spin_unlock_irq(cfqd->queue->queue_lock); |
94f6030ca
|
1360 1361 1362 |
new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask | __GFP_NOFAIL | __GFP_ZERO, cfqd->queue->node); |
22e2c507c
|
1363 1364 1365 |
spin_lock_irq(cfqd->queue->queue_lock); goto retry; } else { |
94f6030ca
|
1366 1367 1368 |
cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask | __GFP_ZERO, cfqd->queue->node); |
22e2c507c
|
1369 1370 1371 |
if (!cfqq) goto out; } |
d9e7620e6
|
1372 |
RB_CLEAR_NODE(&cfqq->rb_node); |
22e2c507c
|
1373 |
INIT_LIST_HEAD(&cfqq->fifo); |
22e2c507c
|
1374 1375 |
atomic_set(&cfqq->ref, 0); cfqq->cfqd = cfqd; |
c5b680f3b
|
1376 |
|
3b18152c3
|
1377 |
cfq_mark_cfqq_prio_changed(cfqq); |
53b03744e
|
1378 |
cfq_mark_cfqq_queue_new(cfqq); |
91fac317a
|
1379 |
|
fd0928df9
|
1380 |
cfq_init_prio_data(cfqq, ioc); |
0871714e0
|
1381 1382 1383 1384 1385 1386 |
if (is_sync) { if (!cfq_class_idle(cfqq)) cfq_mark_cfqq_idle_window(cfqq); cfq_mark_cfqq_sync(cfqq); } |
7b679138b
|
1387 1388 |
cfqq->pid = current->pid; cfq_log_cfqq(cfqd, cfqq, "alloced"); |
22e2c507c
|
1389 1390 1391 1392 |
} if (new_cfqq) kmem_cache_free(cfq_pool, new_cfqq); |
22e2c507c
|
1393 1394 1395 1396 |
out: WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); return cfqq; } |
c2dea2d1f
|
1397 1398 1399 |
static struct cfq_queue ** cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) { |
fe094d98e
|
1400 |
switch (ioprio_class) { |
c2dea2d1f
|
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 |
case IOPRIO_CLASS_RT: return &cfqd->async_cfqq[0][ioprio]; case IOPRIO_CLASS_BE: return &cfqd->async_cfqq[1][ioprio]; case IOPRIO_CLASS_IDLE: return &cfqd->async_idle_cfqq; default: BUG(); } } |
15c31be4d
|
1411 |
static struct cfq_queue * |
fd0928df9
|
1412 |
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, |
15c31be4d
|
1413 1414 |
gfp_t gfp_mask) { |
fd0928df9
|
1415 1416 |
const int ioprio = task_ioprio(ioc); const int ioprio_class = task_ioprio_class(ioc); |
c2dea2d1f
|
1417 |
struct cfq_queue **async_cfqq = NULL; |
15c31be4d
|
1418 |
struct cfq_queue *cfqq = NULL; |
c2dea2d1f
|
1419 1420 1421 1422 |
if (!is_sync) { async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); cfqq = *async_cfqq; } |
0a0836a09
|
1423 |
if (!cfqq) { |
fd0928df9
|
1424 |
cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); |
0a0836a09
|
1425 1426 1427 |
if (!cfqq) return NULL; } |
15c31be4d
|
1428 1429 1430 1431 |
/* * pin the queue now that it's allocated, scheduler exit will prune it */ |
c2dea2d1f
|
1432 |
if (!is_sync && !(*async_cfqq)) { |
15c31be4d
|
1433 |
atomic_inc(&cfqq->ref); |
c2dea2d1f
|
1434 |
*async_cfqq = cfqq; |
15c31be4d
|
1435 1436 1437 1438 1439 |
} atomic_inc(&cfqq->ref); return cfqq; } |
498d3aa2b
|
1440 1441 1442 |
/* * We drop cfq io contexts lazily, so we may find a dead one. */ |
dbecf3ab4
|
1443 |
static void |
4ac845a2e
|
1444 1445 |
cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, struct cfq_io_context *cic) |
dbecf3ab4
|
1446 |
{ |
4ac845a2e
|
1447 |
unsigned long flags; |
fc46379da
|
1448 |
WARN_ON(!list_empty(&cic->queue_list)); |
597bc485d
|
1449 |
|
4ac845a2e
|
1450 |
spin_lock_irqsave(&ioc->lock, flags); |
4faa3c815
|
1451 |
BUG_ON(ioc->ioc_data == cic); |
597bc485d
|
1452 |
|
4ac845a2e
|
1453 |
radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); |
ffc4e7595
|
1454 |
hlist_del_rcu(&cic->cic_list); |
4ac845a2e
|
1455 1456 1457 |
spin_unlock_irqrestore(&ioc->lock, flags); cfq_cic_free(cic); |
dbecf3ab4
|
1458 |
} |
e2d74ac06
|
1459 |
static struct cfq_io_context * |
4ac845a2e
|
1460 |
cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) |
e2d74ac06
|
1461 |
{ |
e2d74ac06
|
1462 |
struct cfq_io_context *cic; |
d6de8be71
|
1463 |
unsigned long flags; |
4ac845a2e
|
1464 |
void *k; |
e2d74ac06
|
1465 |
|
91fac317a
|
1466 1467 |
if (unlikely(!ioc)) return NULL; |
d6de8be71
|
1468 |
rcu_read_lock(); |
597bc485d
|
1469 1470 1471 |
/* * we maintain a last-hit cache, to avoid browsing over the tree */ |
4ac845a2e
|
1472 |
cic = rcu_dereference(ioc->ioc_data); |
d6de8be71
|
1473 1474 |
if (cic && cic->key == cfqd) { rcu_read_unlock(); |
597bc485d
|
1475 |
return cic; |
d6de8be71
|
1476 |
} |
597bc485d
|
1477 |
|
4ac845a2e
|
1478 |
do { |
4ac845a2e
|
1479 1480 1481 1482 |
cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); rcu_read_unlock(); if (!cic) break; |
be3b07535
|
1483 1484 1485 |
/* ->key must be copied to avoid race with cfq_exit_queue() */ k = cic->key; if (unlikely(!k)) { |
4ac845a2e
|
1486 |
cfq_drop_dead_cic(cfqd, ioc, cic); |
d6de8be71
|
1487 |
rcu_read_lock(); |
4ac845a2e
|
1488 |
continue; |
dbecf3ab4
|
1489 |
} |
e2d74ac06
|
1490 |
|
d6de8be71
|
1491 |
spin_lock_irqsave(&ioc->lock, flags); |
4ac845a2e
|
1492 |
rcu_assign_pointer(ioc->ioc_data, cic); |
d6de8be71
|
1493 |
spin_unlock_irqrestore(&ioc->lock, flags); |
4ac845a2e
|
1494 1495 |
break; } while (1); |
e2d74ac06
|
1496 |
|
4ac845a2e
|
1497 |
return cic; |
e2d74ac06
|
1498 |
} |
4ac845a2e
|
1499 1500 1501 1502 1503 |
/* * Add cic into ioc, using cfqd as the search key. This enables us to lookup * the process specific cfq io context when entered from the block layer. * Also adds the cic to a per-cfqd list, used when this queue is removed. */ |
febffd618
|
1504 1505 |
static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, struct cfq_io_context *cic, gfp_t gfp_mask) |
e2d74ac06
|
1506 |
{ |
0261d6886
|
1507 |
unsigned long flags; |
4ac845a2e
|
1508 |
int ret; |
e2d74ac06
|
1509 |
|
4ac845a2e
|
1510 1511 1512 1513 |
ret = radix_tree_preload(gfp_mask); if (!ret) { cic->ioc = ioc; cic->key = cfqd; |
e2d74ac06
|
1514 |
|
4ac845a2e
|
1515 1516 1517 |
spin_lock_irqsave(&ioc->lock, flags); ret = radix_tree_insert(&ioc->radix_root, (unsigned long) cfqd, cic); |
ffc4e7595
|
1518 1519 |
if (!ret) hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); |
4ac845a2e
|
1520 |
spin_unlock_irqrestore(&ioc->lock, flags); |
e2d74ac06
|
1521 |
|
4ac845a2e
|
1522 1523 1524 1525 1526 1527 1528 |
radix_tree_preload_end(); if (!ret) { spin_lock_irqsave(cfqd->queue->queue_lock, flags); list_add(&cic->queue_list, &cfqd->cic_list); spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } |
e2d74ac06
|
1529 |
} |
4ac845a2e
|
1530 1531 1532 |
if (ret) printk(KERN_ERR "cfq: cic link failed! "); |
fc46379da
|
1533 |
|
4ac845a2e
|
1534 |
return ret; |
e2d74ac06
|
1535 |
} |
1da177e4c
|
1536 1537 1538 |
/* * Setup general io context and cfq io context. There can be several cfq * io contexts per general io context, if this process is doing io to more |
e2d74ac06
|
1539 |
* than one device managed by cfq. |
1da177e4c
|
1540 1541 |
*/ static struct cfq_io_context * |
e2d74ac06
|
1542 |
cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) |
1da177e4c
|
1543 |
{ |
22e2c507c
|
1544 |
struct io_context *ioc = NULL; |
1da177e4c
|
1545 |
struct cfq_io_context *cic; |
1da177e4c
|
1546 |
|
22e2c507c
|
1547 |
might_sleep_if(gfp_mask & __GFP_WAIT); |
1da177e4c
|
1548 |
|
b5deef901
|
1549 |
ioc = get_io_context(gfp_mask, cfqd->queue->node); |
1da177e4c
|
1550 1551 |
if (!ioc) return NULL; |
4ac845a2e
|
1552 |
cic = cfq_cic_lookup(cfqd, ioc); |
e2d74ac06
|
1553 1554 |
if (cic) goto out; |
1da177e4c
|
1555 |
|
e2d74ac06
|
1556 1557 1558 |
cic = cfq_alloc_io_context(cfqd, gfp_mask); if (cic == NULL) goto err; |
1da177e4c
|
1559 |
|
4ac845a2e
|
1560 1561 |
if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) goto err_free; |
1da177e4c
|
1562 |
out: |
fc46379da
|
1563 1564 1565 |
smp_read_barrier_depends(); if (unlikely(ioc->ioprio_changed)) cfq_ioc_set_ioprio(ioc); |
1da177e4c
|
1566 |
return cic; |
4ac845a2e
|
1567 1568 |
err_free: cfq_cic_free(cic); |
1da177e4c
|
1569 1570 1571 1572 |
err: put_io_context(ioc); return NULL; } |
22e2c507c
|
1573 1574 |
static void cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) |
1da177e4c
|
1575 |
{ |
aaf1228dd
|
1576 1577 |
unsigned long elapsed = jiffies - cic->last_end_request; unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); |
db3b5848e
|
1578 |
|
22e2c507c
|
1579 1580 1581 1582 |
cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; } |
1da177e4c
|
1583 |
|
206dc69b3
|
1584 |
static void |
6d048f531
|
1585 1586 |
cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, struct request *rq) |
206dc69b3
|
1587 1588 1589 |
{ sector_t sdist; u64 total; |
5e7053747
|
1590 1591 |
if (cic->last_request_pos < rq->sector) sdist = rq->sector - cic->last_request_pos; |
206dc69b3
|
1592 |
else |
5e7053747
|
1593 |
sdist = cic->last_request_pos - rq->sector; |
206dc69b3
|
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 |
/* * Don't allow the seek distance to get too large from the * odd fragment, pagein, etc */ if (cic->seek_samples <= 60) /* second&third seek */ sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); else sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); cic->seek_samples = (7*cic->seek_samples + 256) / 8; cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; total = cic->seek_total + (cic->seek_samples/2); do_div(total, cic->seek_samples); cic->seek_mean = (sector_t)total; } |
1da177e4c
|
1610 |
|
22e2c507c
|
1611 1612 1613 1614 1615 1616 1617 1618 |
/* * Disable idle window if the process thinks too long or seeks so much that * it doesn't matter */ static void cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_io_context *cic) { |
7b679138b
|
1619 |
int old_idle, enable_idle; |
1be92f2fc
|
1620 |
|
0871714e0
|
1621 1622 1623 1624 |
/* * Don't idle for async or idle io prio class */ if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) |
1be92f2fc
|
1625 |
return; |
c265a7f41
|
1626 |
enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); |
1da177e4c
|
1627 |
|
66dac98ed
|
1628 |
if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || |
caaa5f9f0
|
1629 |
(cfqd->hw_tag && CIC_SEEKY(cic))) |
22e2c507c
|
1630 1631 1632 1633 1634 1635 |
enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { if (cic->ttime_mean > cfqd->cfq_slice_idle) enable_idle = 0; else enable_idle = 1; |
1da177e4c
|
1636 |
} |
7b679138b
|
1637 1638 1639 1640 1641 1642 1643 |
if (old_idle != enable_idle) { cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); if (enable_idle) cfq_mark_cfqq_idle_window(cfqq); else cfq_clear_cfqq_idle_window(cfqq); } |
22e2c507c
|
1644 |
} |
1da177e4c
|
1645 |
|
22e2c507c
|
1646 1647 1648 1649 1650 1651 |
/* * Check if new_cfqq should preempt the currently active queue. Return 0 for * no or if we aren't sure, a 1 will cause a preempt. */ static int cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, |
5e7053747
|
1652 |
struct request *rq) |
22e2c507c
|
1653 |
{ |
6d048f531
|
1654 |
struct cfq_queue *cfqq; |
22e2c507c
|
1655 |
|
6d048f531
|
1656 1657 |
cfqq = cfqd->active_queue; if (!cfqq) |
22e2c507c
|
1658 |
return 0; |
6d048f531
|
1659 1660 1661 1662 |
if (cfq_slice_used(cfqq)) return 1; if (cfq_class_idle(new_cfqq)) |
caaa5f9f0
|
1663 |
return 0; |
22e2c507c
|
1664 1665 1666 |
if (cfq_class_idle(cfqq)) return 1; |
1e3335de0
|
1667 |
|
22e2c507c
|
1668 |
/* |
374f84ac3
|
1669 1670 1671 |
* if the new request is sync, but the currently running queue is * not, let the sync request have priority. */ |
5e7053747
|
1672 |
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) |
22e2c507c
|
1673 |
return 1; |
1e3335de0
|
1674 |
|
374f84ac3
|
1675 1676 1677 1678 1679 1680 |
/* * So both queues are sync. Let the new request get disk time if * it's a metadata request and the current queue is doing regular IO. */ if (rq_is_meta(rq) && !cfqq->meta_pending) return 1; |
22e2c507c
|
1681 |
|
1e3335de0
|
1682 1683 1684 1685 1686 1687 1688 |
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) return 0; /* * if this request is as-good as one we would expect from the * current cfqq, let it preempt */ |
6d048f531
|
1689 |
if (cfq_rq_close(cfqd, rq)) |
1e3335de0
|
1690 |
return 1; |
22e2c507c
|
1691 1692 1693 1694 1695 1696 1697 1698 1699 |
return 0; } /* * cfqq preempts the active queue. if we allowed preempt with no slice left, * let it have half of its nominal slice. */ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { |
7b679138b
|
1700 |
cfq_log_cfqq(cfqd, cfqq, "preempt"); |
6084cdda0
|
1701 |
cfq_slice_expired(cfqd, 1); |
22e2c507c
|
1702 |
|
bf5722567
|
1703 1704 1705 1706 1707 |
/* * Put the new queue at the front of the of the current list, * so we know that it will be selected next. */ BUG_ON(!cfq_cfqq_on_rr(cfqq)); |
edd75ffd9
|
1708 1709 |
cfq_service_tree_add(cfqd, cfqq, 1); |
bf5722567
|
1710 |
|
44f7c1606
|
1711 1712 |
cfqq->slice_end = 0; cfq_mark_cfqq_slice_new(cfqq); |
22e2c507c
|
1713 1714 1715 |
} /* |
5e7053747
|
1716 |
* Called when a new fs request (rq) is added (to cfqq). Check if there's |
22e2c507c
|
1717 1718 1719 |
* something we should do about it */ static void |
5e7053747
|
1720 1721 |
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) |
22e2c507c
|
1722 |
{ |
5e7053747
|
1723 |
struct cfq_io_context *cic = RQ_CIC(rq); |
12e9fddd6
|
1724 |
|
374f84ac3
|
1725 1726 |
if (rq_is_meta(rq)) cfqq->meta_pending++; |
9c2c38a12
|
1727 |
cfq_update_io_thinktime(cfqd, cic); |
6d048f531
|
1728 |
cfq_update_io_seektime(cfqd, cic, rq); |
9c2c38a12
|
1729 |
cfq_update_idle_window(cfqd, cfqq, cic); |
5e7053747
|
1730 |
cic->last_request_pos = rq->sector + rq->nr_sectors; |
22e2c507c
|
1731 1732 1733 1734 1735 1736 1737 |
if (cfqq == cfqd->active_queue) { /* * if we are waiting for a request for this queue, let it rip * immediately and flag that we must not expire this queue * just now */ |
3b18152c3
|
1738 1739 |
if (cfq_cfqq_wait_request(cfqq)) { cfq_mark_cfqq_must_dispatch(cfqq); |
22e2c507c
|
1740 |
del_timer(&cfqd->idle_slice_timer); |
dc72ef4ae
|
1741 |
blk_start_queueing(cfqd->queue); |
22e2c507c
|
1742 |
} |
5e7053747
|
1743 |
} else if (cfq_should_preempt(cfqd, cfqq, rq)) { |
22e2c507c
|
1744 1745 1746 1747 1748 1749 |
/* * not the active queue - expire current slice if it is * idle and has expired it's mean thinktime or this new queue * has some old slice time left and is of higher priority */ cfq_preempt_queue(cfqd, cfqq); |
3b18152c3
|
1750 |
cfq_mark_cfqq_must_dispatch(cfqq); |
dc72ef4ae
|
1751 |
blk_start_queueing(cfqd->queue); |
22e2c507c
|
1752 |
} |
1da177e4c
|
1753 |
} |
165125e1e
|
1754 |
static void cfq_insert_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
1755 |
{ |
b4878f245
|
1756 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
5e7053747
|
1757 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
22e2c507c
|
1758 |
|
7b679138b
|
1759 |
cfq_log_cfqq(cfqd, cfqq, "insert_request"); |
fd0928df9
|
1760 |
cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); |
1da177e4c
|
1761 |
|
5e7053747
|
1762 |
cfq_add_rq_rb(rq); |
1da177e4c
|
1763 |
|
22e2c507c
|
1764 |
list_add_tail(&rq->queuelist, &cfqq->fifo); |
5e7053747
|
1765 |
cfq_rq_enqueued(cfqd, cfqq, rq); |
1da177e4c
|
1766 |
} |
165125e1e
|
1767 |
static void cfq_completed_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
1768 |
{ |
5e7053747
|
1769 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
b4878f245
|
1770 |
struct cfq_data *cfqd = cfqq->cfqd; |
5380a101d
|
1771 |
const int sync = rq_is_sync(rq); |
b4878f245
|
1772 |
unsigned long now; |
1da177e4c
|
1773 |
|
b4878f245
|
1774 |
now = jiffies; |
7b679138b
|
1775 |
cfq_log_cfqq(cfqd, cfqq, "complete"); |
1da177e4c
|
1776 |
|
b4878f245
|
1777 |
WARN_ON(!cfqd->rq_in_driver); |
6d048f531
|
1778 |
WARN_ON(!cfqq->dispatched); |
b4878f245
|
1779 |
cfqd->rq_in_driver--; |
6d048f531
|
1780 |
cfqq->dispatched--; |
1da177e4c
|
1781 |
|
3ed9a2965
|
1782 1783 |
if (cfq_cfqq_sync(cfqq)) cfqd->sync_flight--; |
b4878f245
|
1784 1785 |
if (!cfq_class_idle(cfqq)) cfqd->last_end_request = now; |
3b18152c3
|
1786 |
|
caaa5f9f0
|
1787 |
if (sync) |
5e7053747
|
1788 |
RQ_CIC(rq)->last_end_request = now; |
caaa5f9f0
|
1789 1790 1791 1792 1793 1794 |
/* * If this is the active queue, check if it needs to be expired, * or if we want to idle in case it has no pending requests. */ if (cfqd->active_queue == cfqq) { |
44f7c1606
|
1795 1796 1797 1798 |
if (cfq_cfqq_slice_new(cfqq)) { cfq_set_prio_slice(cfqd, cfqq); cfq_clear_cfqq_slice_new(cfqq); } |
0871714e0
|
1799 |
if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) |
6084cdda0
|
1800 |
cfq_slice_expired(cfqd, 1); |
6d048f531
|
1801 1802 |
else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_arm_slice_timer(cfqd); |
caaa5f9f0
|
1803 |
} |
6d048f531
|
1804 1805 1806 |
if (!cfqd->rq_in_driver) cfq_schedule_dispatch(cfqd); |
1da177e4c
|
1807 |
} |
22e2c507c
|
1808 1809 1810 1811 1812 |
/* * we temporarily boost lower priority queues if they are holding fs exclusive * resources. they are boosted to normal prio (CLASS_BE/4) */ static void cfq_prio_boost(struct cfq_queue *cfqq) |
1da177e4c
|
1813 |
{ |
22e2c507c
|
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 |
if (has_fs_excl()) { /* * boost idle prio on transactions that would lock out other * users of the filesystem */ if (cfq_class_idle(cfqq)) cfqq->ioprio_class = IOPRIO_CLASS_BE; if (cfqq->ioprio > IOPRIO_NORM) cfqq->ioprio = IOPRIO_NORM; } else { /* * check if we need to unboost the queue */ if (cfqq->ioprio_class != cfqq->org_ioprio_class) cfqq->ioprio_class = cfqq->org_ioprio_class; if (cfqq->ioprio != cfqq->org_ioprio) cfqq->ioprio = cfqq->org_ioprio; } |
22e2c507c
|
1832 |
} |
1da177e4c
|
1833 |
|
89850f7ee
|
1834 |
static inline int __cfq_may_queue(struct cfq_queue *cfqq) |
22e2c507c
|
1835 |
{ |
3b18152c3
|
1836 |
if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && |
99f95e528
|
1837 |
!cfq_cfqq_must_alloc_slice(cfqq)) { |
3b18152c3
|
1838 |
cfq_mark_cfqq_must_alloc_slice(cfqq); |
22e2c507c
|
1839 |
return ELV_MQUEUE_MUST; |
3b18152c3
|
1840 |
} |
1da177e4c
|
1841 |
|
22e2c507c
|
1842 |
return ELV_MQUEUE_MAY; |
22e2c507c
|
1843 |
} |
165125e1e
|
1844 |
static int cfq_may_queue(struct request_queue *q, int rw) |
22e2c507c
|
1845 1846 1847 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; struct task_struct *tsk = current; |
91fac317a
|
1848 |
struct cfq_io_context *cic; |
22e2c507c
|
1849 1850 1851 1852 1853 1854 1855 1856 |
struct cfq_queue *cfqq; /* * don't force setup of a queue from here, as a call to may_queue * does not necessarily imply that a request actually will be queued. * so just lookup a possibly existing queue, or return 'may queue' * if that fails */ |
4ac845a2e
|
1857 |
cic = cfq_cic_lookup(cfqd, tsk->io_context); |
91fac317a
|
1858 1859 1860 1861 |
if (!cic) return ELV_MQUEUE_MAY; cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); |
22e2c507c
|
1862 |
if (cfqq) { |
fd0928df9
|
1863 |
cfq_init_prio_data(cfqq, cic->ioc); |
22e2c507c
|
1864 |
cfq_prio_boost(cfqq); |
89850f7ee
|
1865 |
return __cfq_may_queue(cfqq); |
22e2c507c
|
1866 1867 1868 |
} return ELV_MQUEUE_MAY; |
1da177e4c
|
1869 |
} |
1da177e4c
|
1870 1871 1872 |
/* * queue lock held here */ |
bb37b94c6
|
1873 |
static void cfq_put_request(struct request *rq) |
1da177e4c
|
1874 |
{ |
5e7053747
|
1875 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
1da177e4c
|
1876 |
|
5e7053747
|
1877 |
if (cfqq) { |
22e2c507c
|
1878 |
const int rw = rq_data_dir(rq); |
1da177e4c
|
1879 |
|
22e2c507c
|
1880 1881 |
BUG_ON(!cfqq->allocated[rw]); cfqq->allocated[rw]--; |
1da177e4c
|
1882 |
|
5e7053747
|
1883 |
put_io_context(RQ_CIC(rq)->ioc); |
1da177e4c
|
1884 |
|
1da177e4c
|
1885 |
rq->elevator_private = NULL; |
5e7053747
|
1886 |
rq->elevator_private2 = NULL; |
1da177e4c
|
1887 |
|
1da177e4c
|
1888 1889 1890 1891 1892 |
cfq_put_queue(cfqq); } } /* |
22e2c507c
|
1893 |
* Allocate cfq data structures associated with this request. |
1da177e4c
|
1894 |
*/ |
22e2c507c
|
1895 |
static int |
165125e1e
|
1896 |
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
1da177e4c
|
1897 1898 1899 1900 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_io_context *cic; const int rw = rq_data_dir(rq); |
7749a8d42
|
1901 |
const int is_sync = rq_is_sync(rq); |
22e2c507c
|
1902 |
struct cfq_queue *cfqq; |
1da177e4c
|
1903 1904 1905 |
unsigned long flags; might_sleep_if(gfp_mask & __GFP_WAIT); |
e2d74ac06
|
1906 |
cic = cfq_get_io_context(cfqd, gfp_mask); |
22e2c507c
|
1907 |
|
1da177e4c
|
1908 |
spin_lock_irqsave(q->queue_lock, flags); |
22e2c507c
|
1909 1910 |
if (!cic) goto queue_fail; |
91fac317a
|
1911 1912 |
cfqq = cic_to_cfqq(cic, is_sync); if (!cfqq) { |
fd0928df9
|
1913 |
cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); |
91fac317a
|
1914 |
|
22e2c507c
|
1915 1916 |
if (!cfqq) goto queue_fail; |
1da177e4c
|
1917 |
|
91fac317a
|
1918 1919 |
cic_set_cfqq(cic, cfqq, is_sync); } |
1da177e4c
|
1920 1921 |
cfqq->allocated[rw]++; |
3b18152c3
|
1922 |
cfq_clear_cfqq_must_alloc(cfqq); |
22e2c507c
|
1923 |
atomic_inc(&cfqq->ref); |
1da177e4c
|
1924 |
|
5e7053747
|
1925 |
spin_unlock_irqrestore(q->queue_lock, flags); |
3b18152c3
|
1926 |
|
5e7053747
|
1927 1928 1929 |
rq->elevator_private = cic; rq->elevator_private2 = cfqq; return 0; |
1da177e4c
|
1930 |
|
22e2c507c
|
1931 1932 1933 |
queue_fail: if (cic) put_io_context(cic->ioc); |
89850f7ee
|
1934 |
|
3b18152c3
|
1935 |
cfq_schedule_dispatch(cfqd); |
1da177e4c
|
1936 |
spin_unlock_irqrestore(q->queue_lock, flags); |
7b679138b
|
1937 |
cfq_log(cfqd, "set_request fail"); |
1da177e4c
|
1938 1939 |
return 1; } |
65f27f384
|
1940 |
static void cfq_kick_queue(struct work_struct *work) |
22e2c507c
|
1941 |
{ |
65f27f384
|
1942 1943 |
struct cfq_data *cfqd = container_of(work, struct cfq_data, unplug_work); |
165125e1e
|
1944 |
struct request_queue *q = cfqd->queue; |
22e2c507c
|
1945 1946 1947 |
unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); |
dc72ef4ae
|
1948 |
blk_start_queueing(q); |
22e2c507c
|
1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 |
spin_unlock_irqrestore(q->queue_lock, flags); } /* * Timer running if the active_queue is currently idling inside its time slice */ static void cfq_idle_slice_timer(unsigned long data) { struct cfq_data *cfqd = (struct cfq_data *) data; struct cfq_queue *cfqq; unsigned long flags; |
3c6bd2f87
|
1960 |
int timed_out = 1; |
22e2c507c
|
1961 |
|
7b679138b
|
1962 |
cfq_log(cfqd, "idle timer fired"); |
22e2c507c
|
1963 |
spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
fe094d98e
|
1964 1965 |
cfqq = cfqd->active_queue; if (cfqq) { |
3c6bd2f87
|
1966 |
timed_out = 0; |
22e2c507c
|
1967 1968 1969 |
/* * expired */ |
44f7c1606
|
1970 |
if (cfq_slice_used(cfqq)) |
22e2c507c
|
1971 1972 1973 1974 1975 1976 |
goto expire; /* * only expire and reinvoke request handler, if there are * other queues with pending requests */ |
caaa5f9f0
|
1977 |
if (!cfqd->busy_queues) |
22e2c507c
|
1978 |
goto out_cont; |
22e2c507c
|
1979 1980 1981 1982 |
/* * not expired and it has a request pending, let it dispatch */ |
dd67d0515
|
1983 |
if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { |
3b18152c3
|
1984 |
cfq_mark_cfqq_must_dispatch(cfqq); |
22e2c507c
|
1985 1986 1987 1988 |
goto out_kick; } } expire: |
6084cdda0
|
1989 |
cfq_slice_expired(cfqd, timed_out); |
22e2c507c
|
1990 |
out_kick: |
3b18152c3
|
1991 |
cfq_schedule_dispatch(cfqd); |
22e2c507c
|
1992 1993 1994 |
out_cont: spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } |
3b18152c3
|
1995 1996 1997 |
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) { del_timer_sync(&cfqd->idle_slice_timer); |
4310864b9
|
1998 |
kblockd_flush_work(&cfqd->unplug_work); |
3b18152c3
|
1999 |
} |
22e2c507c
|
2000 |
|
c2dea2d1f
|
2001 2002 2003 2004 2005 2006 2007 2008 2009 |
static void cfq_put_async_queues(struct cfq_data *cfqd) { int i; for (i = 0; i < IOPRIO_BE_NR; i++) { if (cfqd->async_cfqq[0][i]) cfq_put_queue(cfqd->async_cfqq[0][i]); if (cfqd->async_cfqq[1][i]) cfq_put_queue(cfqd->async_cfqq[1][i]); |
c2dea2d1f
|
2010 |
} |
2389d1ef1
|
2011 2012 2013 |
if (cfqd->async_idle_cfqq) cfq_put_queue(cfqd->async_idle_cfqq); |
c2dea2d1f
|
2014 |
} |
1da177e4c
|
2015 2016 |
static void cfq_exit_queue(elevator_t *e) { |
22e2c507c
|
2017 |
struct cfq_data *cfqd = e->elevator_data; |
165125e1e
|
2018 |
struct request_queue *q = cfqd->queue; |
22e2c507c
|
2019 |
|
3b18152c3
|
2020 |
cfq_shutdown_timer_wq(cfqd); |
e2d74ac06
|
2021 |
|
d9ff41879
|
2022 |
spin_lock_irq(q->queue_lock); |
e2d74ac06
|
2023 |
|
d9ff41879
|
2024 |
if (cfqd->active_queue) |
6084cdda0
|
2025 |
__cfq_slice_expired(cfqd, cfqd->active_queue, 0); |
e2d74ac06
|
2026 2027 |
while (!list_empty(&cfqd->cic_list)) { |
d9ff41879
|
2028 2029 2030 |
struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, struct cfq_io_context, queue_list); |
89850f7ee
|
2031 2032 |
__cfq_exit_single_io_context(cfqd, cic); |
d9ff41879
|
2033 |
} |
e2d74ac06
|
2034 |
|
c2dea2d1f
|
2035 |
cfq_put_async_queues(cfqd); |
15c31be4d
|
2036 |
|
d9ff41879
|
2037 |
spin_unlock_irq(q->queue_lock); |
a90d742e4
|
2038 2039 |
cfq_shutdown_timer_wq(cfqd); |
a90d742e4
|
2040 |
kfree(cfqd); |
1da177e4c
|
2041 |
} |
165125e1e
|
2042 |
static void *cfq_init_queue(struct request_queue *q) |
1da177e4c
|
2043 2044 |
{ struct cfq_data *cfqd; |
1da177e4c
|
2045 |
|
94f6030ca
|
2046 |
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); |
1da177e4c
|
2047 |
if (!cfqd) |
bc1c11697
|
2048 |
return NULL; |
1da177e4c
|
2049 |
|
cc09e2990
|
2050 |
cfqd->service_tree = CFQ_RB_ROOT; |
d9ff41879
|
2051 |
INIT_LIST_HEAD(&cfqd->cic_list); |
1da177e4c
|
2052 |
|
1da177e4c
|
2053 |
cfqd->queue = q; |
1da177e4c
|
2054 |
|
22e2c507c
|
2055 2056 2057 |
init_timer(&cfqd->idle_slice_timer); cfqd->idle_slice_timer.function = cfq_idle_slice_timer; cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
65f27f384
|
2058 |
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
22e2c507c
|
2059 |
|
b70c864d3
|
2060 |
cfqd->last_end_request = jiffies; |
1da177e4c
|
2061 |
cfqd->cfq_quantum = cfq_quantum; |
22e2c507c
|
2062 2063 |
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; |
1da177e4c
|
2064 2065 |
cfqd->cfq_back_max = cfq_back_max; cfqd->cfq_back_penalty = cfq_back_penalty; |
22e2c507c
|
2066 2067 2068 2069 |
cfqd->cfq_slice[0] = cfq_slice_async; cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; |
3b18152c3
|
2070 |
|
bc1c11697
|
2071 |
return cfqd; |
1da177e4c
|
2072 2073 2074 2075 |
} static void cfq_slab_kill(void) { |
d6de8be71
|
2076 2077 2078 2079 |
/* * Caller already ensured that pending RCU callbacks are completed, * so we should have no busy allocations at this point. */ |
1da177e4c
|
2080 2081 2082 2083 2084 2085 2086 2087 |
if (cfq_pool) kmem_cache_destroy(cfq_pool); if (cfq_ioc_pool) kmem_cache_destroy(cfq_ioc_pool); } static int __init cfq_slab_setup(void) { |
0a31bd5f2
|
2088 |
cfq_pool = KMEM_CACHE(cfq_queue, 0); |
1da177e4c
|
2089 2090 |
if (!cfq_pool) goto fail; |
34e6bbf23
|
2091 |
cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); |
1da177e4c
|
2092 2093 2094 2095 2096 2097 2098 2099 |
if (!cfq_ioc_pool) goto fail; return 0; fail: cfq_slab_kill(); return -ENOMEM; } |
1da177e4c
|
2100 2101 2102 |
/* * sysfs parts below --> */ |
1da177e4c
|
2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 |
static ssize_t cfq_var_show(unsigned int var, char *page) { return sprintf(page, "%d ", var); } static ssize_t cfq_var_store(unsigned int *var, const char *page, size_t count) { char *p = (char *) page; *var = simple_strtoul(p, &p, 10); return count; } |
1da177e4c
|
2118 |
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
3d1ab40f4
|
2119 |
static ssize_t __FUNC(elevator_t *e, char *page) \ |
1da177e4c
|
2120 |
{ \ |
3d1ab40f4
|
2121 |
struct cfq_data *cfqd = e->elevator_data; \ |
1da177e4c
|
2122 2123 2124 2125 2126 2127 |
unsigned int __data = __VAR; \ if (__CONV) \ __data = jiffies_to_msecs(__data); \ return cfq_var_show(__data, (page)); \ } SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); |
22e2c507c
|
2128 2129 |
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); |
e572ec7e4
|
2130 2131 |
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); |
22e2c507c
|
2132 2133 2134 2135 |
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
1da177e4c
|
2136 2137 2138 |
#undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
3d1ab40f4
|
2139 |
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
1da177e4c
|
2140 |
{ \ |
3d1ab40f4
|
2141 |
struct cfq_data *cfqd = e->elevator_data; \ |
1da177e4c
|
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 |
unsigned int __data; \ int ret = cfq_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ if (__CONV) \ *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ return ret; \ } STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); |
fe094d98e
|
2155 2156 2157 2158 |
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); |
e572ec7e4
|
2159 |
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); |
fe094d98e
|
2160 2161 |
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); |
22e2c507c
|
2162 2163 2164 |
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
fe094d98e
|
2165 2166 |
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); |
1da177e4c
|
2167 |
#undef STORE_FUNCTION |
e572ec7e4
|
2168 2169 2170 2171 2172 |
#define CFQ_ATTR(name) \ __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(quantum), |
e572ec7e4
|
2173 2174 2175 2176 2177 2178 2179 2180 |
CFQ_ATTR(fifo_expire_sync), CFQ_ATTR(fifo_expire_async), CFQ_ATTR(back_seek_max), CFQ_ATTR(back_seek_penalty), CFQ_ATTR(slice_sync), CFQ_ATTR(slice_async), CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), |
e572ec7e4
|
2181 |
__ATTR_NULL |
1da177e4c
|
2182 |
}; |
1da177e4c
|
2183 2184 2185 2186 2187 |
static struct elevator_type iosched_cfq = { .ops = { .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, |
da7752650
|
2188 |
.elevator_allow_merge_fn = cfq_allow_merge, |
b4878f245
|
2189 |
.elevator_dispatch_fn = cfq_dispatch_requests, |
1da177e4c
|
2190 |
.elevator_add_req_fn = cfq_insert_request, |
b4878f245
|
2191 |
.elevator_activate_req_fn = cfq_activate_request, |
1da177e4c
|
2192 2193 2194 |
.elevator_deactivate_req_fn = cfq_deactivate_request, .elevator_queue_empty_fn = cfq_queue_empty, .elevator_completed_req_fn = cfq_completed_request, |
21183b07e
|
2195 2196 |
.elevator_former_req_fn = elv_rb_former_request, .elevator_latter_req_fn = elv_rb_latter_request, |
1da177e4c
|
2197 2198 2199 2200 2201 |
.elevator_set_req_fn = cfq_set_request, .elevator_put_req_fn = cfq_put_request, .elevator_may_queue_fn = cfq_may_queue, .elevator_init_fn = cfq_init_queue, .elevator_exit_fn = cfq_exit_queue, |
fc46379da
|
2202 |
.trim = cfq_free_io_context, |
1da177e4c
|
2203 |
}, |
3d1ab40f4
|
2204 |
.elevator_attrs = cfq_attrs, |
1da177e4c
|
2205 2206 2207 2208 2209 2210 |
.elevator_name = "cfq", .elevator_owner = THIS_MODULE, }; static int __init cfq_init(void) { |
22e2c507c
|
2211 2212 2213 2214 2215 2216 2217 |
/* * could be 0 on HZ < 1000 setups */ if (!cfq_slice_async) cfq_slice_async = 1; if (!cfq_slice_idle) cfq_slice_idle = 1; |
1da177e4c
|
2218 2219 |
if (cfq_slab_setup()) return -ENOMEM; |
2fdd82bd8
|
2220 |
elv_register(&iosched_cfq); |
1da177e4c
|
2221 |
|
2fdd82bd8
|
2222 |
return 0; |
1da177e4c
|
2223 2224 2225 2226 |
} static void __exit cfq_exit(void) { |
6e9a4738c
|
2227 |
DECLARE_COMPLETION_ONSTACK(all_gone); |
1da177e4c
|
2228 |
elv_unregister(&iosched_cfq); |
334e94de9
|
2229 |
ioc_gone = &all_gone; |
fba822722
|
2230 2231 |
/* ioc_gone's update must be visible before reading ioc_count */ smp_wmb(); |
d6de8be71
|
2232 2233 2234 2235 2236 |
/* * this also protects us from entering cfq_slab_kill() with * pending RCU callbacks */ |
4050cf167
|
2237 |
if (elv_ioc_count_read(ioc_count)) |
9a11b4ed0
|
2238 |
wait_for_completion(&all_gone); |
83521d3eb
|
2239 |
cfq_slab_kill(); |
1da177e4c
|
2240 2241 2242 2243 2244 2245 2246 2247 |
} module_init(cfq_init); module_exit(cfq_exit); MODULE_AUTHOR("Jens Axboe"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); |