Blame view
block/cfq-iosched.c
127 KB
1da177e4c
|
1 |
/* |
1da177e4c
|
2 3 4 5 6 |
* CFQ, or complete fairness queueing, disk scheduler. * * Based on ideas from a previously unfinished io * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. * |
0fe234795
|
7 |
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
1da177e4c
|
8 |
*/ |
1da177e4c
|
9 |
#include <linux/module.h> |
5a0e3ad6a
|
10 |
#include <linux/slab.h> |
1cc9be68e
|
11 12 |
#include <linux/blkdev.h> #include <linux/elevator.h> |
9a7f38c42
|
13 |
#include <linux/ktime.h> |
1da177e4c
|
14 |
#include <linux/rbtree.h> |
22e2c507c
|
15 |
#include <linux/ioprio.h> |
7b679138b
|
16 |
#include <linux/blktrace_api.h> |
eea8f41cc
|
17 |
#include <linux/blk-cgroup.h> |
6e736be7f
|
18 |
#include "blk.h" |
1da177e4c
|
19 20 21 22 |
/* * tunables */ |
fe094d98e
|
23 |
/* max queue in one round of service */ |
abc3c744d
|
24 |
static const int cfq_quantum = 8; |
9a7f38c42
|
25 |
static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; |
fe094d98e
|
26 27 28 29 |
/* maximum backwards seek, in KiB */ static const int cfq_back_max = 16 * 1024; /* penalty of a backwards seek */ static const int cfq_back_penalty = 2; |
9a7f38c42
|
30 31 |
static const u64 cfq_slice_sync = NSEC_PER_SEC / 10; static u64 cfq_slice_async = NSEC_PER_SEC / 25; |
64100099e
|
32 |
static const int cfq_slice_async_rq = 2; |
9a7f38c42
|
33 34 35 |
static u64 cfq_slice_idle = NSEC_PER_SEC / 125; static u64 cfq_group_idle = NSEC_PER_SEC / 125; static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */ |
5db5d6427
|
36 |
static const int cfq_hist_divisor = 4; |
22e2c507c
|
37 |
|
d9e7620e6
|
38 |
/* |
0871714e0
|
39 |
* offset from end of service tree |
d9e7620e6
|
40 |
*/ |
9a7f38c42
|
41 |
#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5) |
d9e7620e6
|
42 43 44 45 |
/* * below this threshold, we consider thinktime immediate */ |
9a7f38c42
|
46 |
#define CFQ_MIN_TT (2 * NSEC_PER_SEC / HZ) |
d9e7620e6
|
47 |
|
22e2c507c
|
48 |
#define CFQ_SLICE_SCALE (5) |
45333d5a3
|
49 |
#define CFQ_HW_QUEUE_MIN (5) |
25bc6b077
|
50 |
#define CFQ_SERVICE_SHIFT 12 |
22e2c507c
|
51 |
|
3dde36dde
|
52 |
#define CFQQ_SEEK_THR (sector_t)(8 * 100) |
e9ce335df
|
53 |
#define CFQQ_CLOSE_THR (sector_t)(8 * 1024) |
41647e7a9
|
54 |
#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) |
3dde36dde
|
55 |
#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) |
ae54abed6
|
56 |
|
a612fddf0
|
57 58 59 |
#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) |
1da177e4c
|
60 |
|
e18b890bb
|
61 |
static struct kmem_cache *cfq_pool; |
1da177e4c
|
62 |
|
22e2c507c
|
63 64 |
#define CFQ_PRIO_LISTS IOPRIO_BE_NR #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
22e2c507c
|
65 |
#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) |
206dc69b3
|
66 |
#define sample_valid(samples) ((samples) > 80) |
1fa8f6d68
|
67 |
#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node) |
206dc69b3
|
68 |
|
e48453c38
|
69 |
/* blkio-related constants */ |
3ecca6293
|
70 71 72 |
#define CFQ_WEIGHT_LEGACY_MIN 10 #define CFQ_WEIGHT_LEGACY_DFL 500 #define CFQ_WEIGHT_LEGACY_MAX 1000 |
e48453c38
|
73 |
|
c58698073
|
74 |
struct cfq_ttime { |
9a7f38c42
|
75 |
u64 last_end_request; |
c58698073
|
76 |
|
9a7f38c42
|
77 78 |
u64 ttime_total; u64 ttime_mean; |
c58698073
|
79 |
unsigned long ttime_samples; |
c58698073
|
80 |
}; |
22e2c507c
|
81 |
/* |
cc09e2990
|
82 83 84 85 86 87 88 89 |
* Most of our rbtree usage is for sorting with min extraction, so * if we cache the leftmost node we don't have to walk down the tree * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should * move this into the elevator for the rq sorting as well. */ struct cfq_rb_root { struct rb_root rb; struct rb_node *left; |
aa6f6a3de
|
90 |
unsigned count; |
1fa8f6d68
|
91 |
u64 min_vdisktime; |
f5f2b6ceb
|
92 |
struct cfq_ttime ttime; |
cc09e2990
|
93 |
}; |
f5f2b6ceb
|
94 |
#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \ |
9a7f38c42
|
95 |
.ttime = {.last_end_request = ktime_get_ns(),},} |
cc09e2990
|
96 97 |
/* |
6118b70b3
|
98 99 100 101 |
* Per process-grouping structure */ struct cfq_queue { /* reference count */ |
30d7b9448
|
102 |
int ref; |
6118b70b3
|
103 104 105 106 107 108 109 |
/* various state flags, see below */ unsigned int flags; /* parent cfq_data */ struct cfq_data *cfqd; /* service_tree member */ struct rb_node rb_node; /* service_tree key */ |
9a7f38c42
|
110 |
u64 rb_key; |
6118b70b3
|
111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
/* prio tree member */ struct rb_node p_node; /* prio tree root we belong to, if any */ struct rb_root *p_root; /* sorted list of pending requests */ struct rb_root sort_list; /* if fifo isn't expired, next request to serve */ struct request *next_rq; /* requests queued in sort_list */ int queued[2]; /* currently allocated requests */ int allocated[2]; /* fifo list of requests in sort_list */ struct list_head fifo; |
dae739ebc
|
125 |
/* time when queue got scheduled in to dispatch first request. */ |
9a7f38c42
|
126 127 128 |
u64 dispatch_start; u64 allocated_slice; u64 slice_dispatch; |
dae739ebc
|
129 |
/* time when first request from queue completed and slice started. */ |
9a7f38c42
|
130 131 |
u64 slice_start; u64 slice_end; |
93fdf1478
|
132 |
s64 slice_resid; |
6118b70b3
|
133 |
|
65299a3b7
|
134 135 |
/* pending priority requests */ int prio_pending; |
6118b70b3
|
136 137 138 139 140 |
/* number of requests that are on the dispatch list or inside driver */ int dispatched; /* io prio of this group */ unsigned short ioprio, org_ioprio; |
b8269db45
|
141 |
unsigned short ioprio_class, org_ioprio_class; |
6118b70b3
|
142 |
|
c4081ba5c
|
143 |
pid_t pid; |
3dde36dde
|
144 |
u32 seek_history; |
b2c18e1e0
|
145 |
sector_t last_request_pos; |
aa6f6a3de
|
146 |
struct cfq_rb_root *service_tree; |
df5fe3e8e
|
147 |
struct cfq_queue *new_cfqq; |
cdb16e8f7
|
148 |
struct cfq_group *cfqg; |
c4e7893eb
|
149 150 |
/* Number of sectors dispatched from queue in single dispatch round */ unsigned long nr_sectors; |
6118b70b3
|
151 152 153 |
}; /* |
718eee057
|
154 |
* First index in the service_trees. |
c0324a020
|
155 156 |
* IDLE is handled separately, so it has negative index */ |
3bf10fea3
|
157 |
enum wl_class_t { |
c0324a020
|
158 |
BE_WORKLOAD = 0, |
615f0259e
|
159 160 |
RT_WORKLOAD = 1, IDLE_WORKLOAD = 2, |
b4627321e
|
161 |
CFQ_PRIO_NR, |
c0324a020
|
162 163 164 |
}; /* |
718eee057
|
165 166 167 168 169 170 171 |
* Second index in the service_trees. */ enum wl_type_t { ASYNC_WORKLOAD = 0, SYNC_NOIDLE_WORKLOAD = 1, SYNC_WORKLOAD = 2 }; |
155fead9b
|
172 173 |
struct cfqg_stats { #ifdef CONFIG_CFQ_GROUP_IOSCHED |
155fead9b
|
174 175 176 177 178 179 180 181 |
/* number of ios merged */ struct blkg_rwstat merged; /* total time spent on device in ns, may not be accurate w/ queueing */ struct blkg_rwstat service_time; /* total time spent waiting in scheduler queue in ns */ struct blkg_rwstat wait_time; /* number of IOs queued up */ struct blkg_rwstat queued; |
155fead9b
|
182 183 184 185 186 187 188 189 190 191 192 193 194 |
/* total disk time and nr sectors dispatched by this group */ struct blkg_stat time; #ifdef CONFIG_DEBUG_BLK_CGROUP /* time not charged to this cgroup */ struct blkg_stat unaccounted_time; /* sum of number of ios queued across all samples */ struct blkg_stat avg_queue_size_sum; /* count of samples taken for average */ struct blkg_stat avg_queue_size_samples; /* how many times this group has been removed from service tree */ struct blkg_stat dequeue; /* total time spent waiting for it to be assigned a timeslice. */ struct blkg_stat group_wait_time; |
3c798398e
|
195 |
/* time spent idling for this blkcg_gq */ |
155fead9b
|
196 197 198 199 200 201 202 203 204 205 206 |
struct blkg_stat idle_time; /* total time with empty current active q with other requests queued */ struct blkg_stat empty_time; /* fields after this shouldn't be cleared on stat reset */ uint64_t start_group_wait_time; uint64_t start_idle_time; uint64_t start_empty_time; uint16_t flags; #endif /* CONFIG_DEBUG_BLK_CGROUP */ #endif /* CONFIG_CFQ_GROUP_IOSCHED */ }; |
e48453c38
|
207 208 209 |
/* Per-cgroup data */ struct cfq_group_data { /* must be the first member */ |
814376483
|
210 |
struct blkcg_policy_data cpd; |
e48453c38
|
211 212 213 214 |
unsigned int weight; unsigned int leaf_weight; }; |
cdb16e8f7
|
215 216 |
/* This is per cgroup per device grouping structure */ struct cfq_group { |
f95a04afa
|
217 218 |
/* must be the first member */ struct blkg_policy_data pd; |
1fa8f6d68
|
219 220 221 222 223 |
/* group service_tree member */ struct rb_node rb_node; /* group service_tree key */ u64 vdisktime; |
e71357e11
|
224 225 |
/* |
7918ffb5b
|
226 227 228 229 230 231 232 233 234 235 236 237 |
* The number of active cfqgs and sum of their weights under this * cfqg. This covers this cfqg's leaf_weight and all children's * weights, but does not cover weights of further descendants. * * If a cfqg is on the service tree, it's active. An active cfqg * also activates its parent and contributes to the children_weight * of the parent. */ int nr_active; unsigned int children_weight; /* |
1d3650f71
|
238 239 240 241 242 243 244 245 246 247 248 249 |
* vfraction is the fraction of vdisktime that the tasks in this * cfqg are entitled to. This is determined by compounding the * ratios walking up from this cfqg to the root. * * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all * vfractions on a service tree is approximately 1. The sum may * deviate a bit due to rounding errors and fluctuations caused by * cfqgs entering and leaving the service tree. */ unsigned int vfraction; /* |
e71357e11
|
250 251 252 253 254 |
* There are two weights - (internal) weight is the weight of this * cfqg against the sibling cfqgs. leaf_weight is the wight of * this cfqg against the child cfqgs. For the root cfqg, both * weights are kept in sync for backward compatibility. */ |
25bc6b077
|
255 |
unsigned int weight; |
8184f93ec
|
256 |
unsigned int new_weight; |
3381cb8d2
|
257 |
unsigned int dev_weight; |
1fa8f6d68
|
258 |
|
e71357e11
|
259 260 261 |
unsigned int leaf_weight; unsigned int new_leaf_weight; unsigned int dev_leaf_weight; |
1fa8f6d68
|
262 263 |
/* number of cfqq currently on this group */ int nr_cfqq; |
cdb16e8f7
|
264 |
/* |
4495a7d41
|
265 |
* Per group busy queues average. Useful for workload slice calc. We |
b4627321e
|
266 267 268 269 270 271 272 273 274 275 276 |
* create the array for each prio class but at run time it is used * only for RT and BE class and slot for IDLE class remains unused. * This is primarily done to avoid confusion and a gcc warning. */ unsigned int busy_queues_avg[CFQ_PRIO_NR]; /* * rr lists of queues with requests. We maintain service trees for * RT and BE classes. These trees are subdivided in subclasses * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE * class there is no subclassification and all the cfq queues go on * a single tree service_tree_idle. |
cdb16e8f7
|
277 278 279 280 |
* Counts are embedded in the cfq_rb_root */ struct cfq_rb_root service_trees[2][3]; struct cfq_rb_root service_tree_idle; |
dae739ebc
|
281 |
|
9a7f38c42
|
282 |
u64 saved_wl_slice; |
4d2ceea4c
|
283 284 |
enum wl_type_t saved_wl_type; enum wl_class_t saved_wl_class; |
4eef30499
|
285 |
|
80bdf0c78
|
286 287 |
/* number of requests that are on the dispatch list or inside driver */ int dispatched; |
7700fc4f6
|
288 |
struct cfq_ttime ttime; |
0b39920b5
|
289 |
struct cfqg_stats stats; /* stats for this cfqg */ |
60a837077
|
290 291 292 293 |
/* async queue for each priority case */ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; struct cfq_queue *async_idle_cfqq; |
cdb16e8f7
|
294 |
}; |
718eee057
|
295 |
|
c58698073
|
296 297 298 299 |
struct cfq_io_cq { struct io_cq icq; /* must be the first member */ struct cfq_queue *cfqq[2]; struct cfq_ttime ttime; |
598971bfb
|
300 301 |
int ioprio; /* the current ioprio */ #ifdef CONFIG_CFQ_GROUP_IOSCHED |
f4da80727
|
302 |
uint64_t blkcg_serial_nr; /* the current blkcg serial */ |
598971bfb
|
303 |
#endif |
c58698073
|
304 |
}; |
718eee057
|
305 |
/* |
22e2c507c
|
306 307 |
* Per block device queue structure */ |
1da177e4c
|
308 |
struct cfq_data { |
165125e1e
|
309 |
struct request_queue *queue; |
1fa8f6d68
|
310 311 |
/* Root service tree for cfq_groups */ struct cfq_rb_root grp_service_tree; |
f51b802c1
|
312 |
struct cfq_group *root_group; |
22e2c507c
|
313 314 |
/* |
c0324a020
|
315 |
* The priority currently being served |
22e2c507c
|
316 |
*/ |
4d2ceea4c
|
317 318 |
enum wl_class_t serving_wl_class; enum wl_type_t serving_wl_type; |
9a7f38c42
|
319 |
u64 workload_expires; |
cdb16e8f7
|
320 |
struct cfq_group *serving_group; |
a36e71f99
|
321 322 323 324 325 326 327 |
/* * Each priority tree is sorted by next_request position. These * trees are used when determining if two or more queues are * interleaving requests (see cfq_close_cooperator). */ struct rb_root prio_trees[CFQ_PRIO_LISTS]; |
22e2c507c
|
328 |
unsigned int busy_queues; |
ef8a41df8
|
329 |
unsigned int busy_sync_queues; |
22e2c507c
|
330 |
|
53c583d22
|
331 332 |
int rq_in_driver; int rq_in_flight[2]; |
45333d5a3
|
333 334 335 336 337 |
/* * queue-depth detection */ int rq_queued; |
25776e359
|
338 |
int hw_tag; |
e459dd08f
|
339 340 341 342 343 344 345 346 |
/* * hw_tag can be * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection) * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth) * 0 => no NCQ */ int hw_tag_est_depth; unsigned int hw_tag_samples; |
1da177e4c
|
347 |
|
22e2c507c
|
348 |
/* |
22e2c507c
|
349 350 |
* idle window management */ |
911483258
|
351 |
struct hrtimer idle_slice_timer; |
23e018a1b
|
352 |
struct work_struct unplug_work; |
1da177e4c
|
353 |
|
22e2c507c
|
354 |
struct cfq_queue *active_queue; |
c58698073
|
355 |
struct cfq_io_cq *active_cic; |
22e2c507c
|
356 |
|
6d048f531
|
357 |
sector_t last_position; |
1da177e4c
|
358 |
|
1da177e4c
|
359 360 361 362 |
/* * tunables, see top of file */ unsigned int cfq_quantum; |
1da177e4c
|
363 364 |
unsigned int cfq_back_penalty; unsigned int cfq_back_max; |
22e2c507c
|
365 |
unsigned int cfq_slice_async_rq; |
963b72fc6
|
366 |
unsigned int cfq_latency; |
9a7f38c42
|
367 368 369 370 371 |
u64 cfq_fifo_expire[2]; u64 cfq_slice[2]; u64 cfq_slice_idle; u64 cfq_group_idle; u64 cfq_target_latency; |
d9ff41879
|
372 |
|
6118b70b3
|
373 374 375 376 |
/* * Fallback dummy cfqq for extreme OOM conditions */ struct cfq_queue oom_cfqq; |
365722bb9
|
377 |
|
9a7f38c42
|
378 |
u64 last_delayed_sync; |
1da177e4c
|
379 |
}; |
25fb5169d
|
380 |
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); |
60a837077
|
381 |
static void cfq_put_queue(struct cfq_queue *cfqq); |
25fb5169d
|
382 |
|
34b98d03b
|
383 |
static struct cfq_rb_root *st_for(struct cfq_group *cfqg, |
3bf10fea3
|
384 |
enum wl_class_t class, |
65b32a573
|
385 |
enum wl_type_t type) |
c0324a020
|
386 |
{ |
1fa8f6d68
|
387 388 |
if (!cfqg) return NULL; |
3bf10fea3
|
389 |
if (class == IDLE_WORKLOAD) |
cdb16e8f7
|
390 |
return &cfqg->service_tree_idle; |
c0324a020
|
391 |
|
3bf10fea3
|
392 |
return &cfqg->service_trees[class][type]; |
c0324a020
|
393 |
} |
3b18152c3
|
394 |
enum cfqq_state_flags { |
b0b8d7494
|
395 396 |
CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ |
b029195dd
|
397 |
CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ |
b0b8d7494
|
398 |
CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ |
b0b8d7494
|
399 400 401 |
CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ |
44f7c1606
|
402 |
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ |
91fac317a
|
403 |
CFQ_CFQQ_FLAG_sync, /* synchronous queue */ |
b3b6d0408
|
404 |
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ |
ae54abed6
|
405 |
CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ |
76280aff1
|
406 |
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ |
f75edf2dc
|
407 |
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ |
3b18152c3
|
408 409 410 411 412 |
}; #define CFQ_CFQQ_FNS(name) \ static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ { \ |
fe094d98e
|
413 |
(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ |
3b18152c3
|
414 415 416 |
} \ static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ { \ |
fe094d98e
|
417 |
(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ |
3b18152c3
|
418 419 420 |
} \ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ { \ |
fe094d98e
|
421 |
return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ |
3b18152c3
|
422 423 424 425 |
} CFQ_CFQQ_FNS(on_rr); CFQ_CFQQ_FNS(wait_request); |
b029195dd
|
426 |
CFQ_CFQQ_FNS(must_dispatch); |
3b18152c3
|
427 |
CFQ_CFQQ_FNS(must_alloc_slice); |
3b18152c3
|
428 429 430 |
CFQ_CFQQ_FNS(fifo_expire); CFQ_CFQQ_FNS(idle_window); CFQ_CFQQ_FNS(prio_changed); |
44f7c1606
|
431 |
CFQ_CFQQ_FNS(slice_new); |
91fac317a
|
432 |
CFQ_CFQQ_FNS(sync); |
a36e71f99
|
433 |
CFQ_CFQQ_FNS(coop); |
ae54abed6
|
434 |
CFQ_CFQQ_FNS(split_coop); |
76280aff1
|
435 |
CFQ_CFQQ_FNS(deep); |
f75edf2dc
|
436 |
CFQ_CFQQ_FNS(wait_busy); |
3b18152c3
|
437 |
#undef CFQ_CFQQ_FNS |
629ed0b10
|
438 |
#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
2ce4d50f9
|
439 |
|
155fead9b
|
440 441 442 443 444 |
/* cfqg stats flags */ enum cfqg_stats_flags { CFQG_stats_waiting = 0, CFQG_stats_idling, CFQG_stats_empty, |
629ed0b10
|
445 |
}; |
155fead9b
|
446 447 |
#define CFQG_FLAG_FNS(name) \ static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \ |
629ed0b10
|
448 |
{ \ |
155fead9b
|
449 |
stats->flags |= (1 << CFQG_stats_##name); \ |
629ed0b10
|
450 |
} \ |
155fead9b
|
451 |
static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \ |
629ed0b10
|
452 |
{ \ |
155fead9b
|
453 |
stats->flags &= ~(1 << CFQG_stats_##name); \ |
629ed0b10
|
454 |
} \ |
155fead9b
|
455 |
static inline int cfqg_stats_##name(struct cfqg_stats *stats) \ |
629ed0b10
|
456 |
{ \ |
155fead9b
|
457 |
return (stats->flags & (1 << CFQG_stats_##name)) != 0; \ |
629ed0b10
|
458 |
} \ |
155fead9b
|
459 460 461 462 |
CFQG_FLAG_FNS(waiting) CFQG_FLAG_FNS(idling) CFQG_FLAG_FNS(empty) #undef CFQG_FLAG_FNS |
629ed0b10
|
463 464 |
/* This should be called with the queue_lock held. */ |
155fead9b
|
465 |
static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats) |
629ed0b10
|
466 467 |
{ unsigned long long now; |
155fead9b
|
468 |
if (!cfqg_stats_waiting(stats)) |
629ed0b10
|
469 470 471 472 473 474 |
return; now = sched_clock(); if (time_after64(now, stats->start_group_wait_time)) blkg_stat_add(&stats->group_wait_time, now - stats->start_group_wait_time); |
155fead9b
|
475 |
cfqg_stats_clear_waiting(stats); |
629ed0b10
|
476 477 478 |
} /* This should be called with the queue_lock held. */ |
155fead9b
|
479 480 |
static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) |
629ed0b10
|
481 |
{ |
155fead9b
|
482 |
struct cfqg_stats *stats = &cfqg->stats; |
629ed0b10
|
483 |
|
155fead9b
|
484 |
if (cfqg_stats_waiting(stats)) |
629ed0b10
|
485 |
return; |
155fead9b
|
486 |
if (cfqg == curr_cfqg) |
629ed0b10
|
487 |
return; |
155fead9b
|
488 489 |
stats->start_group_wait_time = sched_clock(); cfqg_stats_mark_waiting(stats); |
629ed0b10
|
490 491 492 |
} /* This should be called with the queue_lock held. */ |
155fead9b
|
493 |
static void cfqg_stats_end_empty_time(struct cfqg_stats *stats) |
629ed0b10
|
494 495 |
{ unsigned long long now; |
155fead9b
|
496 |
if (!cfqg_stats_empty(stats)) |
629ed0b10
|
497 498 499 500 501 502 |
return; now = sched_clock(); if (time_after64(now, stats->start_empty_time)) blkg_stat_add(&stats->empty_time, now - stats->start_empty_time); |
155fead9b
|
503 |
cfqg_stats_clear_empty(stats); |
629ed0b10
|
504 |
} |
155fead9b
|
505 |
static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) |
629ed0b10
|
506 |
{ |
155fead9b
|
507 |
blkg_stat_add(&cfqg->stats.dequeue, 1); |
629ed0b10
|
508 |
} |
155fead9b
|
509 |
static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) |
629ed0b10
|
510 |
{ |
155fead9b
|
511 |
struct cfqg_stats *stats = &cfqg->stats; |
629ed0b10
|
512 |
|
4d5e80a76
|
513 |
if (blkg_rwstat_total(&stats->queued)) |
629ed0b10
|
514 515 516 517 518 519 520 |
return; /* * group is already marked empty. This can happen if cfqq got new * request in parent group and moved to this group while being added * to service tree. Just ignore the event and move on. */ |
155fead9b
|
521 |
if (cfqg_stats_empty(stats)) |
629ed0b10
|
522 523 524 |
return; stats->start_empty_time = sched_clock(); |
155fead9b
|
525 |
cfqg_stats_mark_empty(stats); |
629ed0b10
|
526 |
} |
155fead9b
|
527 |
static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) |
629ed0b10
|
528 |
{ |
155fead9b
|
529 |
struct cfqg_stats *stats = &cfqg->stats; |
629ed0b10
|
530 |
|
155fead9b
|
531 |
if (cfqg_stats_idling(stats)) { |
629ed0b10
|
532 533 534 535 536 |
unsigned long long now = sched_clock(); if (time_after64(now, stats->start_idle_time)) blkg_stat_add(&stats->idle_time, now - stats->start_idle_time); |
155fead9b
|
537 |
cfqg_stats_clear_idling(stats); |
629ed0b10
|
538 539 |
} } |
155fead9b
|
540 |
static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) |
629ed0b10
|
541 |
{ |
155fead9b
|
542 |
struct cfqg_stats *stats = &cfqg->stats; |
629ed0b10
|
543 |
|
155fead9b
|
544 |
BUG_ON(cfqg_stats_idling(stats)); |
629ed0b10
|
545 546 |
stats->start_idle_time = sched_clock(); |
155fead9b
|
547 |
cfqg_stats_mark_idling(stats); |
629ed0b10
|
548 |
} |
155fead9b
|
549 |
static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) |
629ed0b10
|
550 |
{ |
155fead9b
|
551 |
struct cfqg_stats *stats = &cfqg->stats; |
629ed0b10
|
552 553 |
blkg_stat_add(&stats->avg_queue_size_sum, |
4d5e80a76
|
554 |
blkg_rwstat_total(&stats->queued)); |
629ed0b10
|
555 |
blkg_stat_add(&stats->avg_queue_size_samples, 1); |
155fead9b
|
556 |
cfqg_stats_update_group_wait_time(stats); |
629ed0b10
|
557 558 559 |
} #else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ |
f48ec1d78
|
560 561 562 563 564 565 566 |
static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { } static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { } static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { } static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { } static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { } static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { } static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { } |
629ed0b10
|
567 568 569 570 |
#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ #ifdef CONFIG_CFQ_GROUP_IOSCHED |
2ce4d50f9
|
571 |
|
4ceab71b9
|
572 573 574 575 576 577 578 579 |
static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd) { return pd ? container_of(pd, struct cfq_group, pd) : NULL; } static struct cfq_group_data *cpd_to_cfqgd(struct blkcg_policy_data *cpd) { |
814376483
|
580 |
return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL; |
4ceab71b9
|
581 582 583 584 585 586 |
} static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) { return pd_to_blkg(&cfqg->pd); } |
ffea73fc7
|
587 588 589 590 591 592 |
static struct blkcg_policy blkcg_policy_cfq; static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg) { return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq)); } |
e48453c38
|
593 594 595 596 |
static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg) { return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq)); } |
d02f7aa8d
|
597 |
static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) |
7918ffb5b
|
598 |
{ |
d02f7aa8d
|
599 |
struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent; |
7918ffb5b
|
600 |
|
d02f7aa8d
|
601 |
return pblkg ? blkg_to_cfqg(pblkg) : NULL; |
7918ffb5b
|
602 |
} |
3984aa552
|
603 604 605 606 607 608 |
static inline bool cfqg_is_descendant(struct cfq_group *cfqg, struct cfq_group *ancestor) { return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup, cfqg_to_blkg(ancestor)->blkcg->css.cgroup); } |
eb7d8c07f
|
609 610 611 612 613 614 615 616 617 |
static inline void cfqg_get(struct cfq_group *cfqg) { return blkg_get(cfqg_to_blkg(cfqg)); } static inline void cfqg_put(struct cfq_group *cfqg) { return blkg_put(cfqg_to_blkg(cfqg)); } |
54e7ed12b
|
618 619 620 621 |
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ char __pbuf[128]; \ \ blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \ |
b226e5c41
|
622 623 624 |
blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ |
54e7ed12b
|
625 626 627 628 629 630 631 632 633 |
__pbuf, ##args); \ } while (0) #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ char __pbuf[128]; \ \ blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \ blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \ } while (0) |
2868ef7b3
|
634 |
|
155fead9b
|
635 |
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, |
63a4cc248
|
636 637 |
struct cfq_group *curr_cfqg, int op, int op_flags) |
2ce4d50f9
|
638 |
{ |
63a4cc248
|
639 |
blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1); |
155fead9b
|
640 641 |
cfqg_stats_end_empty_time(&cfqg->stats); cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); |
2ce4d50f9
|
642 |
} |
155fead9b
|
643 |
static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, |
9a7f38c42
|
644 |
uint64_t time, unsigned long unaccounted_time) |
2ce4d50f9
|
645 |
{ |
155fead9b
|
646 |
blkg_stat_add(&cfqg->stats.time, time); |
629ed0b10
|
647 |
#ifdef CONFIG_DEBUG_BLK_CGROUP |
155fead9b
|
648 |
blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time); |
629ed0b10
|
649 |
#endif |
2ce4d50f9
|
650 |
} |
63a4cc248
|
651 652 |
static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op, int op_flags) |
2ce4d50f9
|
653 |
{ |
63a4cc248
|
654 |
blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1); |
2ce4d50f9
|
655 |
} |
63a4cc248
|
656 657 |
static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op, int op_flags) |
2ce4d50f9
|
658 |
{ |
63a4cc248
|
659 |
blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1); |
2ce4d50f9
|
660 |
} |
155fead9b
|
661 |
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, |
63a4cc248
|
662 663 |
uint64_t start_time, uint64_t io_start_time, int op, int op_flags) |
2ce4d50f9
|
664 |
{ |
155fead9b
|
665 |
struct cfqg_stats *stats = &cfqg->stats; |
629ed0b10
|
666 |
unsigned long long now = sched_clock(); |
629ed0b10
|
667 668 |
if (time_after64(now, io_start_time)) |
63a4cc248
|
669 670 |
blkg_rwstat_add(&stats->service_time, op, op_flags, now - io_start_time); |
629ed0b10
|
671 |
if (time_after64(io_start_time, start_time)) |
63a4cc248
|
672 |
blkg_rwstat_add(&stats->wait_time, op, op_flags, |
629ed0b10
|
673 |
io_start_time - start_time); |
2ce4d50f9
|
674 |
} |
689665af4
|
675 676 |
/* @stats = 0 */ static void cfqg_stats_reset(struct cfqg_stats *stats) |
155fead9b
|
677 |
{ |
155fead9b
|
678 |
/* queued stats shouldn't be cleared */ |
155fead9b
|
679 680 681 682 683 684 685 686 687 688 689 690 691 692 |
blkg_rwstat_reset(&stats->merged); blkg_rwstat_reset(&stats->service_time); blkg_rwstat_reset(&stats->wait_time); blkg_stat_reset(&stats->time); #ifdef CONFIG_DEBUG_BLK_CGROUP blkg_stat_reset(&stats->unaccounted_time); blkg_stat_reset(&stats->avg_queue_size_sum); blkg_stat_reset(&stats->avg_queue_size_samples); blkg_stat_reset(&stats->dequeue); blkg_stat_reset(&stats->group_wait_time); blkg_stat_reset(&stats->idle_time); blkg_stat_reset(&stats->empty_time); #endif } |
0b39920b5
|
693 |
/* @to += @from */ |
e6269c445
|
694 |
static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from) |
0b39920b5
|
695 696 |
{ /* queued stats shouldn't be cleared */ |
e6269c445
|
697 698 699 700 |
blkg_rwstat_add_aux(&to->merged, &from->merged); blkg_rwstat_add_aux(&to->service_time, &from->service_time); blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); blkg_stat_add_aux(&from->time, &from->time); |
0b39920b5
|
701 |
#ifdef CONFIG_DEBUG_BLK_CGROUP |
e6269c445
|
702 703 704 705 706 707 708 |
blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time); blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples); blkg_stat_add_aux(&to->dequeue, &from->dequeue); blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); blkg_stat_add_aux(&to->idle_time, &from->idle_time); blkg_stat_add_aux(&to->empty_time, &from->empty_time); |
0b39920b5
|
709 710 711 712 |
#endif } /* |
e6269c445
|
713 |
* Transfer @cfqg's stats to its parent's aux counts so that the ancestors' |
0b39920b5
|
714 715 716 717 718 719 720 721 722 723 724 |
* recursive stats can still account for the amount used by this cfqg after * it's gone. */ static void cfqg_stats_xfer_dead(struct cfq_group *cfqg) { struct cfq_group *parent = cfqg_parent(cfqg); lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock); if (unlikely(!parent)) return; |
e6269c445
|
725 |
cfqg_stats_add_aux(&parent->stats, &cfqg->stats); |
0b39920b5
|
726 |
cfqg_stats_reset(&cfqg->stats); |
0b39920b5
|
727 |
} |
eb7d8c07f
|
728 |
#else /* CONFIG_CFQ_GROUP_IOSCHED */ |
d02f7aa8d
|
729 |
static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; } |
3984aa552
|
730 731 732 733 734 |
static inline bool cfqg_is_descendant(struct cfq_group *cfqg, struct cfq_group *ancestor) { return true; } |
eb7d8c07f
|
735 736 |
static inline void cfqg_get(struct cfq_group *cfqg) { } static inline void cfqg_put(struct cfq_group *cfqg) { } |
7b679138b
|
737 |
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ |
b226e5c41
|
738 739 740 741 |
blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ ##args) |
4495a7d41
|
742 |
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) |
eb7d8c07f
|
743 |
|
155fead9b
|
744 |
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, |
63a4cc248
|
745 |
struct cfq_group *curr_cfqg, int op, int op_flags) { } |
155fead9b
|
746 |
static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, |
9a7f38c42
|
747 |
uint64_t time, unsigned long unaccounted_time) { } |
63a4cc248
|
748 749 750 751 |
static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op, int op_flags) { } static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op, int op_flags) { } |
155fead9b
|
752 |
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, |
63a4cc248
|
753 754 |
uint64_t start_time, uint64_t io_start_time, int op, int op_flags) { } |
2ce4d50f9
|
755 |
|
eb7d8c07f
|
756 |
#endif /* CONFIG_CFQ_GROUP_IOSCHED */ |
7b679138b
|
757 758 |
#define cfq_log(cfqd, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) |
615f0259e
|
759 760 761 762 763 764 765 766 767 |
/* Traverses through cfq group service trees */ #define for_each_cfqg_st(cfqg, i, j, st) \ for (i = 0; i <= IDLE_WORKLOAD; i++) \ for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\ : &cfqg->service_tree_idle; \ (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \ (i == IDLE_WORKLOAD && j == 0); \ j++, st = i < IDLE_WORKLOAD ? \ &cfqg->service_trees[i][j]: NULL) \ |
f5f2b6ceb
|
768 769 770 |
static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd, struct cfq_ttime *ttime, bool group_idle) { |
9a7f38c42
|
771 |
u64 slice; |
f5f2b6ceb
|
772 773 774 775 776 777 778 779 |
if (!sample_valid(ttime->ttime_samples)) return false; if (group_idle) slice = cfqd->cfq_group_idle; else slice = cfqd->cfq_slice_idle; return ttime->ttime_mean > slice; } |
615f0259e
|
780 |
|
02b35081f
|
781 782 783 784 785 786 787 788 789 790 791 792 793 794 |
static inline bool iops_mode(struct cfq_data *cfqd) { /* * If we are not idling on queues and it is a NCQ drive, parallel * execution of requests is on and measuring time is not possible * in most of the cases until and unless we drive shallower queue * depths and that becomes a performance bottleneck. In such cases * switch to start providing fairness in terms of number of IOs. */ if (!cfqd->cfq_slice_idle && cfqd->hw_tag) return true; else return false; } |
3bf10fea3
|
795 |
static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq) |
c0324a020
|
796 797 798 799 800 801 802 |
{ if (cfq_class_idle(cfqq)) return IDLE_WORKLOAD; if (cfq_class_rt(cfqq)) return RT_WORKLOAD; return BE_WORKLOAD; } |
718eee057
|
803 804 805 806 807 808 809 810 811 |
static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) { if (!cfq_cfqq_sync(cfqq)) return ASYNC_WORKLOAD; if (!cfq_cfqq_idle_window(cfqq)) return SYNC_NOIDLE_WORKLOAD; return SYNC_WORKLOAD; } |
3bf10fea3
|
812 |
static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class, |
58ff82f34
|
813 814 |
struct cfq_data *cfqd, struct cfq_group *cfqg) |
c0324a020
|
815 |
{ |
3bf10fea3
|
816 |
if (wl_class == IDLE_WORKLOAD) |
cdb16e8f7
|
817 |
return cfqg->service_tree_idle.count; |
c0324a020
|
818 |
|
34b98d03b
|
819 820 821 |
return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count + cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count + cfqg->service_trees[wl_class][SYNC_WORKLOAD].count; |
c0324a020
|
822 |
} |
f26bd1f0a
|
823 824 825 |
static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, struct cfq_group *cfqg) { |
34b98d03b
|
826 827 |
return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; |
f26bd1f0a
|
828 |
} |
165125e1e
|
829 |
static void cfq_dispatch_insert(struct request_queue *, struct request *); |
4f85cb96d
|
830 |
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync, |
2da8de0bb
|
831 |
struct cfq_io_cq *cic, struct bio *bio); |
91fac317a
|
832 |
|
c58698073
|
833 834 835 836 837 |
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) { /* cic->icq is the first member, %NULL will convert to %NULL */ return container_of(icq, struct cfq_io_cq, icq); } |
47fdd4ca9
|
838 839 840 841 842 843 844 |
static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) { if (ioc) return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue)); return NULL; } |
c58698073
|
845 |
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync) |
91fac317a
|
846 |
{ |
a6151c3a5
|
847 |
return cic->cfqq[is_sync]; |
91fac317a
|
848 |
} |
c58698073
|
849 850 |
static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq, bool is_sync) |
91fac317a
|
851 |
{ |
a6151c3a5
|
852 |
cic->cfqq[is_sync] = cfqq; |
91fac317a
|
853 |
} |
c58698073
|
854 |
static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic) |
bca4b914b
|
855 |
{ |
c58698073
|
856 |
return cic->icq.q->elevator->elevator_data; |
bca4b914b
|
857 |
} |
91fac317a
|
858 859 860 861 |
/* * We regard a request as SYNC, if it's either a read or has the SYNC bit * set (in which case it could also be direct WRITE). */ |
a6151c3a5
|
862 |
static inline bool cfq_bio_sync(struct bio *bio) |
91fac317a
|
863 |
{ |
1eff9d322
|
864 |
return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC); |
91fac317a
|
865 |
} |
1da177e4c
|
866 |
|
1da177e4c
|
867 |
/* |
99f95e528
|
868 869 870 |
* scheduler run of queue, if there are requests pending and no one in the * driver that will restart queueing */ |
23e018a1b
|
871 |
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) |
99f95e528
|
872 |
{ |
7b679138b
|
873 874 |
if (cfqd->busy_queues) { cfq_log(cfqd, "schedule dispatch"); |
59c3d45e4
|
875 |
kblockd_schedule_work(&cfqd->unplug_work); |
7b679138b
|
876 |
} |
99f95e528
|
877 |
} |
1da177e4c
|
878 |
/* |
44f7c1606
|
879 880 881 882 |
* Scale schedule slice based on io priority. Use the sync time slice only * if a queue is marked sync and has sync io queued. A sync queue with async * io only, should not get full sync slice length. */ |
9a7f38c42
|
883 |
static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync, |
d9e7620e6
|
884 |
unsigned short prio) |
44f7c1606
|
885 |
{ |
9a7f38c42
|
886 887 |
u64 base_slice = cfqd->cfq_slice[sync]; u64 slice = div_u64(base_slice, CFQ_SLICE_SCALE); |
44f7c1606
|
888 |
|
d9e7620e6
|
889 |
WARN_ON(prio >= IOPRIO_BE_NR); |
9a7f38c42
|
890 |
return base_slice + (slice * (4 - prio)); |
d9e7620e6
|
891 |
} |
44f7c1606
|
892 |
|
9a7f38c42
|
893 |
static inline u64 |
d9e7620e6
|
894 895 896 |
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); |
44f7c1606
|
897 |
} |
1d3650f71
|
898 899 900 901 902 903 904 905 906 907 908 909 |
/** * cfqg_scale_charge - scale disk time charge according to cfqg weight * @charge: disk time being charged * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT * * Scale @charge according to @vfraction, which is in range (0, 1]. The * scaling is inversely proportional. * * scaled = charge / vfraction * * The result is also in fixed point w/ CFQ_SERVICE_SHIFT. */ |
9a7f38c42
|
910 |
static inline u64 cfqg_scale_charge(u64 charge, |
1d3650f71
|
911 |
unsigned int vfraction) |
25bc6b077
|
912 |
{ |
1d3650f71
|
913 |
u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */ |
25bc6b077
|
914 |
|
1d3650f71
|
915 916 |
/* charge / vfraction */ c <<= CFQ_SERVICE_SHIFT; |
9a7f38c42
|
917 |
return div_u64(c, vfraction); |
25bc6b077
|
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 |
} static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) { s64 delta = (s64)(vdisktime - min_vdisktime); if (delta > 0) min_vdisktime = vdisktime; return min_vdisktime; } static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime) { s64 delta = (s64)(vdisktime - min_vdisktime); if (delta < 0) min_vdisktime = vdisktime; return min_vdisktime; } static void update_min_vdisktime(struct cfq_rb_root *st) { |
25bc6b077
|
940 |
struct cfq_group *cfqg; |
25bc6b077
|
941 942 |
if (st->left) { cfqg = rb_entry_cfqg(st->left); |
a60327107
|
943 944 |
st->min_vdisktime = max_vdisktime(st->min_vdisktime, cfqg->vdisktime); |
25bc6b077
|
945 |
} |
25bc6b077
|
946 |
} |
5db5d6427
|
947 948 949 950 951 |
/* * get averaged number of queues of RT/BE priority. * average is updated, with a formula that gives more weight to higher numbers, * to quickly follows sudden increases and decrease slowly */ |
58ff82f34
|
952 953 |
static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, struct cfq_group *cfqg, bool rt) |
5869619cb
|
954 |
{ |
5db5d6427
|
955 956 957 |
unsigned min_q, max_q; unsigned mult = cfq_hist_divisor - 1; unsigned round = cfq_hist_divisor / 2; |
58ff82f34
|
958 |
unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); |
5db5d6427
|
959 |
|
58ff82f34
|
960 961 962 |
min_q = min(cfqg->busy_queues_avg[rt], busy); max_q = max(cfqg->busy_queues_avg[rt], busy); cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) / |
5db5d6427
|
963 |
cfq_hist_divisor; |
58ff82f34
|
964 965 |
return cfqg->busy_queues_avg[rt]; } |
9a7f38c42
|
966 |
static inline u64 |
58ff82f34
|
967 968 |
cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) { |
41cad6ab2
|
969 |
return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; |
5db5d6427
|
970 |
} |
9a7f38c42
|
971 |
static inline u64 |
ba5bd520f
|
972 |
cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
44f7c1606
|
973 |
{ |
9a7f38c42
|
974 |
u64 slice = cfq_prio_to_slice(cfqd, cfqq); |
5db5d6427
|
975 |
if (cfqd->cfq_latency) { |
58ff82f34
|
976 977 978 979 980 981 |
/* * interested queues (we consider only the ones with the same * priority class in the cfq group) */ unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, cfq_class_rt(cfqq)); |
9a7f38c42
|
982 983 984 |
u64 sync_slice = cfqd->cfq_slice[1]; u64 expect_latency = sync_slice * iq; u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg); |
58ff82f34
|
985 986 |
if (expect_latency > group_slice) { |
9a7f38c42
|
987 988 |
u64 base_low_slice = 2 * cfqd->cfq_slice_idle; u64 low_slice; |
5db5d6427
|
989 990 |
/* scale low_slice according to IO priority * and sync vs async */ |
9a7f38c42
|
991 992 |
low_slice = div64_u64(base_low_slice*slice, sync_slice); low_slice = min(slice, low_slice); |
5db5d6427
|
993 994 |
/* the adapted slice value is scaled to fit all iqs * into the target latency */ |
9a7f38c42
|
995 996 |
slice = div64_u64(slice*group_slice, expect_latency); slice = max(slice, low_slice); |
5db5d6427
|
997 998 |
} } |
c553f8e33
|
999 1000 1001 1002 1003 1004 |
return slice; } static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { |
9a7f38c42
|
1005 1006 |
u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq); u64 now = ktime_get_ns(); |
c553f8e33
|
1007 |
|
9a7f38c42
|
1008 1009 |
cfqq->slice_start = now; cfqq->slice_end = now + slice; |
f75edf2dc
|
1010 |
cfqq->allocated_slice = slice; |
9a7f38c42
|
1011 |
cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now); |
44f7c1606
|
1012 1013 1014 1015 1016 1017 1018 |
} /* * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end * isn't valid until the first request from the dispatch is activated * and the slice time set. */ |
a6151c3a5
|
1019 |
static inline bool cfq_slice_used(struct cfq_queue *cfqq) |
44f7c1606
|
1020 1021 |
{ if (cfq_cfqq_slice_new(cfqq)) |
c1e44756f
|
1022 |
return false; |
9a7f38c42
|
1023 |
if (ktime_get_ns() < cfqq->slice_end) |
c1e44756f
|
1024 |
return false; |
44f7c1606
|
1025 |
|
c1e44756f
|
1026 |
return true; |
44f7c1606
|
1027 1028 1029 |
} /* |
5e7053747
|
1030 |
* Lifted from AS - choose which of rq1 and rq2 that is best served now. |
1da177e4c
|
1031 |
* We choose the request that is closest to the head right now. Distance |
e8a99053e
|
1032 |
* behind the head is penalized and only allowed to a certain extent. |
1da177e4c
|
1033 |
*/ |
5e7053747
|
1034 |
static struct request * |
cf7c25cf9
|
1035 |
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) |
1da177e4c
|
1036 |
{ |
cf7c25cf9
|
1037 |
sector_t s1, s2, d1 = 0, d2 = 0; |
1da177e4c
|
1038 |
unsigned long back_max; |
e8a99053e
|
1039 1040 1041 |
#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ unsigned wrap = 0; /* bit mask: requests behind the disk head? */ |
1da177e4c
|
1042 |
|
5e7053747
|
1043 1044 1045 1046 |
if (rq1 == NULL || rq1 == rq2) return rq2; if (rq2 == NULL) return rq1; |
9c2c38a12
|
1047 |
|
229836bd6
|
1048 1049 |
if (rq_is_sync(rq1) != rq_is_sync(rq2)) return rq_is_sync(rq1) ? rq1 : rq2; |
65299a3b7
|
1050 1051 |
if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO) return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2; |
b53d1ed73
|
1052 |
|
83096ebf1
|
1053 1054 |
s1 = blk_rq_pos(rq1); s2 = blk_rq_pos(rq2); |
1da177e4c
|
1055 |
|
1da177e4c
|
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 |
/* * by definition, 1KiB is 2 sectors */ back_max = cfqd->cfq_back_max * 2; /* * Strict one way elevator _except_ in the case where we allow * short backward seeks which are biased as twice the cost of a * similar forward seek. */ if (s1 >= last) d1 = s1 - last; else if (s1 + back_max >= last) d1 = (last - s1) * cfqd->cfq_back_penalty; else |
e8a99053e
|
1071 |
wrap |= CFQ_RQ1_WRAP; |
1da177e4c
|
1072 1073 1074 1075 1076 1077 |
if (s2 >= last) d2 = s2 - last; else if (s2 + back_max >= last) d2 = (last - s2) * cfqd->cfq_back_penalty; else |
e8a99053e
|
1078 |
wrap |= CFQ_RQ2_WRAP; |
1da177e4c
|
1079 1080 |
/* Found required data */ |
e8a99053e
|
1081 1082 1083 1084 1085 1086 |
/* * By doing switch() on the bit mask "wrap" we avoid having to * check two variables for all permutations: --> faster! */ switch (wrap) { |
5e7053747
|
1087 |
case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ |
e8a99053e
|
1088 |
if (d1 < d2) |
5e7053747
|
1089 |
return rq1; |
e8a99053e
|
1090 |
else if (d2 < d1) |
5e7053747
|
1091 |
return rq2; |
e8a99053e
|
1092 1093 |
else { if (s1 >= s2) |
5e7053747
|
1094 |
return rq1; |
e8a99053e
|
1095 |
else |
5e7053747
|
1096 |
return rq2; |
e8a99053e
|
1097 |
} |
1da177e4c
|
1098 |
|
e8a99053e
|
1099 |
case CFQ_RQ2_WRAP: |
5e7053747
|
1100 |
return rq1; |
e8a99053e
|
1101 |
case CFQ_RQ1_WRAP: |
5e7053747
|
1102 1103 |
return rq2; case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ |
e8a99053e
|
1104 1105 1106 1107 1108 1109 1110 1111 |
default: /* * Since both rqs are wrapped, * start with the one that's further behind head * (--> only *one* back seek required), * since back seek takes more time than forward. */ if (s1 <= s2) |
5e7053747
|
1112 |
return rq1; |
1da177e4c
|
1113 |
else |
5e7053747
|
1114 |
return rq2; |
1da177e4c
|
1115 1116 |
} } |
498d3aa2b
|
1117 1118 1119 |
/* * The below is leftmost cache rbtree addon */ |
0871714e0
|
1120 |
static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) |
cc09e2990
|
1121 |
{ |
615f0259e
|
1122 1123 1124 |
/* Service tree is empty */ if (!root->count) return NULL; |
cc09e2990
|
1125 1126 |
if (!root->left) root->left = rb_first(&root->rb); |
0871714e0
|
1127 1128 1129 1130 |
if (root->left) return rb_entry(root->left, struct cfq_queue, rb_node); return NULL; |
cc09e2990
|
1131 |
} |
1fa8f6d68
|
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 |
static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root) { if (!root->left) root->left = rb_first(&root->rb); if (root->left) return rb_entry_cfqg(root->left); return NULL; } |
a36e71f99
|
1142 1143 1144 1145 1146 |
static void rb_erase_init(struct rb_node *n, struct rb_root *root) { rb_erase(n, root); RB_CLEAR_NODE(n); } |
cc09e2990
|
1147 1148 1149 1150 |
static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) { if (root->left == n) root->left = NULL; |
a36e71f99
|
1151 |
rb_erase_init(n, &root->rb); |
aa6f6a3de
|
1152 |
--root->count; |
cc09e2990
|
1153 |
} |
1da177e4c
|
1154 1155 1156 |
/* * would be nice to take fifo expire time into account as well */ |
5e7053747
|
1157 1158 1159 |
static struct request * cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *last) |
1da177e4c
|
1160 |
{ |
21183b07e
|
1161 1162 |
struct rb_node *rbnext = rb_next(&last->rb_node); struct rb_node *rbprev = rb_prev(&last->rb_node); |
5e7053747
|
1163 |
struct request *next = NULL, *prev = NULL; |
1da177e4c
|
1164 |
|
21183b07e
|
1165 |
BUG_ON(RB_EMPTY_NODE(&last->rb_node)); |
1da177e4c
|
1166 1167 |
if (rbprev) |
5e7053747
|
1168 |
prev = rb_entry_rq(rbprev); |
1da177e4c
|
1169 |
|
21183b07e
|
1170 |
if (rbnext) |
5e7053747
|
1171 |
next = rb_entry_rq(rbnext); |
21183b07e
|
1172 1173 1174 |
else { rbnext = rb_first(&cfqq->sort_list); if (rbnext && rbnext != &last->rb_node) |
5e7053747
|
1175 |
next = rb_entry_rq(rbnext); |
21183b07e
|
1176 |
} |
1da177e4c
|
1177 |
|
cf7c25cf9
|
1178 |
return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); |
1da177e4c
|
1179 |
} |
9a7f38c42
|
1180 1181 |
static u64 cfq_slice_offset(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
1182 |
{ |
d9e7620e6
|
1183 1184 1185 |
/* * just an approximation, should be ok. */ |
cdb16e8f7
|
1186 |
return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - |
464191c65
|
1187 |
cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); |
d9e7620e6
|
1188 |
} |
1fa8f6d68
|
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 |
static inline s64 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) { return cfqg->vdisktime - st->min_vdisktime; } static void __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) { struct rb_node **node = &st->rb.rb_node; struct rb_node *parent = NULL; struct cfq_group *__cfqg; s64 key = cfqg_key(st, cfqg); int left = 1; while (*node != NULL) { parent = *node; __cfqg = rb_entry_cfqg(parent); if (key < cfqg_key(st, __cfqg)) node = &parent->rb_left; else { node = &parent->rb_right; left = 0; } } if (left) st->left = &cfqg->rb_node; rb_link_node(&cfqg->rb_node, parent, node); rb_insert_color(&cfqg->rb_node, &st->rb); } |
7b5af5cff
|
1222 1223 1224 |
/* * This has to be called only on activation of cfqg */ |
1fa8f6d68
|
1225 |
static void |
8184f93ec
|
1226 1227 |
cfq_update_group_weight(struct cfq_group *cfqg) { |
3381cb8d2
|
1228 |
if (cfqg->new_weight) { |
8184f93ec
|
1229 |
cfqg->weight = cfqg->new_weight; |
3381cb8d2
|
1230 |
cfqg->new_weight = 0; |
8184f93ec
|
1231 |
} |
e15693ef1
|
1232 1233 1234 1235 1236 1237 |
} static void cfq_update_group_leaf_weight(struct cfq_group *cfqg) { BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); |
e71357e11
|
1238 1239 1240 1241 1242 |
if (cfqg->new_leaf_weight) { cfqg->leaf_weight = cfqg->new_leaf_weight; cfqg->new_leaf_weight = 0; } |
8184f93ec
|
1243 1244 1245 1246 1247 |
} static void cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) { |
1d3650f71
|
1248 |
unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */ |
7918ffb5b
|
1249 |
struct cfq_group *pos = cfqg; |
1d3650f71
|
1250 |
struct cfq_group *parent; |
7918ffb5b
|
1251 1252 1253 |
bool propagate; /* add to the service tree */ |
8184f93ec
|
1254 |
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); |
7b5af5cff
|
1255 1256 1257 1258 1259 |
/* * Update leaf_weight. We cannot update weight at this point * because cfqg might already have been activated and is * contributing its current weight to the parent's child_weight. */ |
e15693ef1
|
1260 |
cfq_update_group_leaf_weight(cfqg); |
8184f93ec
|
1261 |
__cfq_group_service_tree_add(st, cfqg); |
7918ffb5b
|
1262 1263 |
/* |
1d3650f71
|
1264 1265 1266 1267 1268 1269 1270 |
* Activate @cfqg and calculate the portion of vfraction @cfqg is * entitled to. vfraction is calculated by walking the tree * towards the root calculating the fraction it has at each level. * The compounded ratio is how much vfraction @cfqg owns. * * Start with the proportion tasks in this cfqg has against active * children cfqgs - its leaf_weight against children_weight. |
7918ffb5b
|
1271 1272 1273 |
*/ propagate = !pos->nr_active++; pos->children_weight += pos->leaf_weight; |
1d3650f71
|
1274 |
vfr = vfr * pos->leaf_weight / pos->children_weight; |
7918ffb5b
|
1275 |
|
1d3650f71
|
1276 1277 1278 1279 1280 1281 |
/* * Compound ->weight walking up the tree. Both activation and * vfraction calculation are done in the same loop. Propagation * stops once an already activated node is met. vfraction * calculation should always continue to the root. */ |
d02f7aa8d
|
1282 |
while ((parent = cfqg_parent(pos))) { |
1d3650f71
|
1283 |
if (propagate) { |
e15693ef1
|
1284 |
cfq_update_group_weight(pos); |
1d3650f71
|
1285 1286 1287 1288 |
propagate = !parent->nr_active++; parent->children_weight += pos->weight; } vfr = vfr * pos->weight / parent->children_weight; |
7918ffb5b
|
1289 1290 |
pos = parent; } |
1d3650f71
|
1291 1292 |
cfqg->vfraction = max_t(unsigned, vfr, 1); |
8184f93ec
|
1293 1294 1295 1296 |
} static void cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) |
1fa8f6d68
|
1297 1298 1299 1300 1301 1302 |
{ struct cfq_rb_root *st = &cfqd->grp_service_tree; struct cfq_group *__cfqg; struct rb_node *n; cfqg->nr_cfqq++; |
760701bfe
|
1303 |
if (!RB_EMPTY_NODE(&cfqg->rb_node)) |
1fa8f6d68
|
1304 1305 1306 1307 1308 |
return; /* * Currently put the group at the end. Later implement something * so that groups get lesser vtime based on their weights, so that |
25985edce
|
1309 |
* if group does not loose all if it was not continuously backlogged. |
1fa8f6d68
|
1310 1311 1312 1313 1314 1315 1316 |
*/ n = rb_last(&st->rb); if (n) { __cfqg = rb_entry_cfqg(n); cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; } else cfqg->vdisktime = st->min_vdisktime; |
8184f93ec
|
1317 1318 |
cfq_group_service_tree_add(st, cfqg); } |
1fa8f6d68
|
1319 |
|
8184f93ec
|
1320 1321 1322 |
static void cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) { |
7918ffb5b
|
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 |
struct cfq_group *pos = cfqg; bool propagate; /* * Undo activation from cfq_group_service_tree_add(). Deactivate * @cfqg and propagate deactivation upwards. */ propagate = !--pos->nr_active; pos->children_weight -= pos->leaf_weight; while (propagate) { |
d02f7aa8d
|
1334 |
struct cfq_group *parent = cfqg_parent(pos); |
7918ffb5b
|
1335 1336 1337 |
/* @pos has 0 nr_active at this point */ WARN_ON_ONCE(pos->children_weight); |
1d3650f71
|
1338 |
pos->vfraction = 0; |
7918ffb5b
|
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 |
if (!parent) break; propagate = !--parent->nr_active; parent->children_weight -= pos->weight; pos = parent; } /* remove from the service tree */ |
8184f93ec
|
1349 1350 |
if (!RB_EMPTY_NODE(&cfqg->rb_node)) cfq_rb_erase(&cfqg->rb_node, st); |
1fa8f6d68
|
1351 1352 1353 |
} static void |
8184f93ec
|
1354 |
cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) |
1fa8f6d68
|
1355 1356 1357 1358 1359 |
{ struct cfq_rb_root *st = &cfqd->grp_service_tree; BUG_ON(cfqg->nr_cfqq < 1); cfqg->nr_cfqq--; |
25bc6b077
|
1360 |
|
1fa8f6d68
|
1361 1362 1363 |
/* If there are other cfq queues under this group, don't delete it */ if (cfqg->nr_cfqq) return; |
2868ef7b3
|
1364 |
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); |
8184f93ec
|
1365 |
cfq_group_service_tree_del(st, cfqg); |
4d2ceea4c
|
1366 |
cfqg->saved_wl_slice = 0; |
155fead9b
|
1367 |
cfqg_stats_update_dequeue(cfqg); |
dae739ebc
|
1368 |
} |
9a7f38c42
|
1369 1370 |
static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq, u64 *unaccounted_time) |
dae739ebc
|
1371 |
{ |
9a7f38c42
|
1372 1373 |
u64 slice_used; u64 now = ktime_get_ns(); |
dae739ebc
|
1374 1375 1376 1377 1378 |
/* * Queue got expired before even a single request completed or * got expired immediately after first request completion. */ |
9a7f38c42
|
1379 |
if (!cfqq->slice_start || cfqq->slice_start == now) { |
dae739ebc
|
1380 1381 1382 1383 1384 1385 |
/* * Also charge the seek time incurred to the group, otherwise * if there are mutiple queues in the group, each can dispatch * a single request on seeky media and cause lots of seek time * and group will never know it. */ |
0b31c10c6
|
1386 1387 |
slice_used = max_t(u64, (now - cfqq->dispatch_start), jiffies_to_nsecs(1)); |
dae739ebc
|
1388 |
} else { |
9a7f38c42
|
1389 |
slice_used = now - cfqq->slice_start; |
167400d34
|
1390 1391 |
if (slice_used > cfqq->allocated_slice) { *unaccounted_time = slice_used - cfqq->allocated_slice; |
f75edf2dc
|
1392 |
slice_used = cfqq->allocated_slice; |
167400d34
|
1393 |
} |
9a7f38c42
|
1394 |
if (cfqq->slice_start > cfqq->dispatch_start) |
167400d34
|
1395 1396 |
*unaccounted_time += cfqq->slice_start - cfqq->dispatch_start; |
dae739ebc
|
1397 |
} |
dae739ebc
|
1398 1399 1400 1401 |
return slice_used; } static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, |
e5ff082e8
|
1402 |
struct cfq_queue *cfqq) |
dae739ebc
|
1403 1404 |
{ struct cfq_rb_root *st = &cfqd->grp_service_tree; |
9a7f38c42
|
1405 |
u64 used_sl, charge, unaccounted_sl = 0; |
f26bd1f0a
|
1406 1407 |
int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) - cfqg->service_tree_idle.count; |
1d3650f71
|
1408 |
unsigned int vfr; |
9a7f38c42
|
1409 |
u64 now = ktime_get_ns(); |
f26bd1f0a
|
1410 1411 |
BUG_ON(nr_sync < 0); |
167400d34
|
1412 |
used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); |
dae739ebc
|
1413 |
|
02b35081f
|
1414 1415 1416 1417 |
if (iops_mode(cfqd)) charge = cfqq->slice_dispatch; else if (!cfq_cfqq_sync(cfqq) && !nr_sync) charge = cfqq->allocated_slice; |
dae739ebc
|
1418 |
|
1d3650f71
|
1419 1420 1421 1422 1423 1424 1425 |
/* * Can't update vdisktime while on service tree and cfqg->vfraction * is valid only while on it. Cache vfr, leave the service tree, * update vdisktime and go back on. The re-addition to the tree * will also update the weights as necessary. */ vfr = cfqg->vfraction; |
8184f93ec
|
1426 |
cfq_group_service_tree_del(st, cfqg); |
1d3650f71
|
1427 |
cfqg->vdisktime += cfqg_scale_charge(charge, vfr); |
8184f93ec
|
1428 |
cfq_group_service_tree_add(st, cfqg); |
dae739ebc
|
1429 1430 |
/* This group is being expired. Save the context */ |
9a7f38c42
|
1431 1432 |
if (cfqd->workload_expires > now) { cfqg->saved_wl_slice = cfqd->workload_expires - now; |
4d2ceea4c
|
1433 1434 |
cfqg->saved_wl_type = cfqd->serving_wl_type; cfqg->saved_wl_class = cfqd->serving_wl_class; |
dae739ebc
|
1435 |
} else |
4d2ceea4c
|
1436 |
cfqg->saved_wl_slice = 0; |
2868ef7b3
|
1437 1438 1439 |
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, st->min_vdisktime); |
fd16d2631
|
1440 |
cfq_log_cfqq(cfqq->cfqd, cfqq, |
9a7f38c42
|
1441 |
"sl_used=%llu disp=%llu charge=%llu iops=%u sect=%lu", |
fd16d2631
|
1442 1443 |
used_sl, cfqq->slice_dispatch, charge, iops_mode(cfqd), cfqq->nr_sectors); |
155fead9b
|
1444 1445 |
cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl); cfqg_stats_set_start_empty_time(cfqg); |
1fa8f6d68
|
1446 |
} |
f51b802c1
|
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 |
/** * cfq_init_cfqg_base - initialize base part of a cfq_group * @cfqg: cfq_group to initialize * * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED * is enabled or not. */ static void cfq_init_cfqg_base(struct cfq_group *cfqg) { struct cfq_rb_root *st; int i, j; for_each_cfqg_st(cfqg, i, j, st) *st = CFQ_RB_ROOT; RB_CLEAR_NODE(&cfqg->rb_node); |
9a7f38c42
|
1462 |
cfqg->ttime.last_end_request = ktime_get_ns(); |
f51b802c1
|
1463 |
} |
25fb5169d
|
1464 |
#ifdef CONFIG_CFQ_GROUP_IOSCHED |
69d7fde59
|
1465 1466 |
static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val, bool on_dfl, bool reset_dev, bool is_leaf_weight); |
24bdb8ef0
|
1467 |
static void cfqg_stats_exit(struct cfqg_stats *stats) |
90d3839b9
|
1468 |
{ |
24bdb8ef0
|
1469 1470 1471 1472 |
blkg_rwstat_exit(&stats->merged); blkg_rwstat_exit(&stats->service_time); blkg_rwstat_exit(&stats->wait_time); blkg_rwstat_exit(&stats->queued); |
24bdb8ef0
|
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 |
blkg_stat_exit(&stats->time); #ifdef CONFIG_DEBUG_BLK_CGROUP blkg_stat_exit(&stats->unaccounted_time); blkg_stat_exit(&stats->avg_queue_size_sum); blkg_stat_exit(&stats->avg_queue_size_samples); blkg_stat_exit(&stats->dequeue); blkg_stat_exit(&stats->group_wait_time); blkg_stat_exit(&stats->idle_time); blkg_stat_exit(&stats->empty_time); #endif } static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp) { |
77ea73388
|
1487 |
if (blkg_rwstat_init(&stats->merged, gfp) || |
24bdb8ef0
|
1488 1489 1490 |
blkg_rwstat_init(&stats->service_time, gfp) || blkg_rwstat_init(&stats->wait_time, gfp) || blkg_rwstat_init(&stats->queued, gfp) || |
24bdb8ef0
|
1491 1492 |
blkg_stat_init(&stats->time, gfp)) goto err; |
90d3839b9
|
1493 1494 |
#ifdef CONFIG_DEBUG_BLK_CGROUP |
24bdb8ef0
|
1495 1496 1497 1498 1499 1500 1501 1502 |
if (blkg_stat_init(&stats->unaccounted_time, gfp) || blkg_stat_init(&stats->avg_queue_size_sum, gfp) || blkg_stat_init(&stats->avg_queue_size_samples, gfp) || blkg_stat_init(&stats->dequeue, gfp) || blkg_stat_init(&stats->group_wait_time, gfp) || blkg_stat_init(&stats->idle_time, gfp) || blkg_stat_init(&stats->empty_time, gfp)) goto err; |
90d3839b9
|
1503 |
#endif |
24bdb8ef0
|
1504 1505 1506 1507 |
return 0; err: cfqg_stats_exit(stats); return -ENOMEM; |
90d3839b9
|
1508 |
} |
e4a9bde95
|
1509 1510 1511 |
static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp) { struct cfq_group_data *cgd; |
f57d87100
|
1512 |
cgd = kzalloc(sizeof(*cgd), gfp); |
e4a9bde95
|
1513 1514 1515 1516 |
if (!cgd) return NULL; return &cgd->cpd; } |
814376483
|
1517 |
static void cfq_cpd_init(struct blkcg_policy_data *cpd) |
e48453c38
|
1518 |
{ |
814376483
|
1519 |
struct cfq_group_data *cgd = cpd_to_cfqgd(cpd); |
9e10a130d
|
1520 |
unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ? |
69d7fde59
|
1521 |
CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL; |
e48453c38
|
1522 |
|
69d7fde59
|
1523 1524 1525 1526 1527 |
if (cpd_to_blkcg(cpd) == &blkcg_root) weight *= 2; cgd->weight = weight; cgd->leaf_weight = weight; |
e48453c38
|
1528 |
} |
e4a9bde95
|
1529 1530 1531 1532 |
static void cfq_cpd_free(struct blkcg_policy_data *cpd) { kfree(cpd_to_cfqgd(cpd)); } |
69d7fde59
|
1533 1534 1535 |
static void cfq_cpd_bind(struct blkcg_policy_data *cpd) { struct blkcg *blkcg = cpd_to_blkcg(cpd); |
9e10a130d
|
1536 |
bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys); |
69d7fde59
|
1537 1538 1539 1540 1541 1542 1543 1544 |
unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL; if (blkcg == &blkcg_root) weight *= 2; WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false)); WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true)); } |
001bea73e
|
1545 1546 |
static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node) { |
b2ce2643c
|
1547 1548 1549 1550 1551 1552 1553 |
struct cfq_group *cfqg; cfqg = kzalloc_node(sizeof(*cfqg), gfp, node); if (!cfqg) return NULL; cfq_init_cfqg_base(cfqg); |
24bdb8ef0
|
1554 1555 1556 1557 |
if (cfqg_stats_init(&cfqg->stats, gfp)) { kfree(cfqg); return NULL; } |
b2ce2643c
|
1558 1559 |
return &cfqg->pd; |
001bea73e
|
1560 |
} |
a9520cd6f
|
1561 |
static void cfq_pd_init(struct blkg_policy_data *pd) |
f469a7b4d
|
1562 |
{ |
a9520cd6f
|
1563 1564 |
struct cfq_group *cfqg = pd_to_cfqg(pd); struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg); |
25fb5169d
|
1565 |
|
e48453c38
|
1566 1567 |
cfqg->weight = cgd->weight; cfqg->leaf_weight = cgd->leaf_weight; |
25fb5169d
|
1568 |
} |
a9520cd6f
|
1569 |
static void cfq_pd_offline(struct blkg_policy_data *pd) |
0b39920b5
|
1570 |
{ |
a9520cd6f
|
1571 |
struct cfq_group *cfqg = pd_to_cfqg(pd); |
60a837077
|
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 |
int i; for (i = 0; i < IOPRIO_BE_NR; i++) { if (cfqg->async_cfqq[0][i]) cfq_put_queue(cfqg->async_cfqq[0][i]); if (cfqg->async_cfqq[1][i]) cfq_put_queue(cfqg->async_cfqq[1][i]); } if (cfqg->async_idle_cfqq) cfq_put_queue(cfqg->async_idle_cfqq); |
0b39920b5
|
1583 1584 1585 1586 1587 1588 |
/* * @blkg is going offline and will be ignored by * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so * that they don't get lost. If IOs complete after this point, the * stats for them will be lost. Oh well... */ |
60a837077
|
1589 |
cfqg_stats_xfer_dead(cfqg); |
0b39920b5
|
1590 |
} |
001bea73e
|
1591 1592 |
static void cfq_pd_free(struct blkg_policy_data *pd) { |
24bdb8ef0
|
1593 1594 1595 1596 |
struct cfq_group *cfqg = pd_to_cfqg(pd); cfqg_stats_exit(&cfqg->stats); return kfree(cfqg); |
001bea73e
|
1597 |
} |
a9520cd6f
|
1598 |
static void cfq_pd_reset_stats(struct blkg_policy_data *pd) |
689665af4
|
1599 |
{ |
a9520cd6f
|
1600 |
struct cfq_group *cfqg = pd_to_cfqg(pd); |
689665af4
|
1601 1602 |
cfqg_stats_reset(&cfqg->stats); |
25fb5169d
|
1603 |
} |
ae1188963
|
1604 1605 |
static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd, struct blkcg *blkcg) |
25fb5169d
|
1606 |
{ |
ae1188963
|
1607 |
struct blkcg_gq *blkg; |
f469a7b4d
|
1608 |
|
ae1188963
|
1609 1610 1611 1612 |
blkg = blkg_lookup(blkcg, cfqd->queue); if (likely(blkg)) return blkg_to_cfqg(blkg); return NULL; |
25fb5169d
|
1613 1614 1615 1616 |
} static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { |
25fb5169d
|
1617 |
cfqq->cfqg = cfqg; |
b1c357696
|
1618 |
/* cfqq reference on cfqg */ |
eb7d8c07f
|
1619 |
cfqg_get(cfqg); |
b1c357696
|
1620 |
} |
f95a04afa
|
1621 1622 |
static u64 cfqg_prfill_weight_device(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
60c2bc2d5
|
1623 |
{ |
f95a04afa
|
1624 |
struct cfq_group *cfqg = pd_to_cfqg(pd); |
3381cb8d2
|
1625 1626 |
if (!cfqg->dev_weight) |
60c2bc2d5
|
1627 |
return 0; |
f95a04afa
|
1628 |
return __blkg_prfill_u64(sf, pd, cfqg->dev_weight); |
60c2bc2d5
|
1629 |
} |
2da8ca822
|
1630 |
static int cfqg_print_weight_device(struct seq_file *sf, void *v) |
60c2bc2d5
|
1631 |
{ |
2da8ca822
|
1632 1633 1634 |
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), cfqg_prfill_weight_device, &blkcg_policy_cfq, 0, false); |
60c2bc2d5
|
1635 1636 |
return 0; } |
e71357e11
|
1637 1638 1639 1640 1641 1642 1643 1644 1645 |
static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct cfq_group *cfqg = pd_to_cfqg(pd); if (!cfqg->dev_leaf_weight) return 0; return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight); } |
2da8ca822
|
1646 |
static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v) |
e71357e11
|
1647 |
{ |
2da8ca822
|
1648 1649 1650 |
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0, false); |
e71357e11
|
1651 1652 |
return 0; } |
2da8ca822
|
1653 |
static int cfq_print_weight(struct seq_file *sf, void *v) |
60c2bc2d5
|
1654 |
{ |
e48453c38
|
1655 |
struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); |
9470e4a69
|
1656 1657 |
struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg); unsigned int val = 0; |
e48453c38
|
1658 |
|
9470e4a69
|
1659 1660 1661 1662 1663 |
if (cgd) val = cgd->weight; seq_printf(sf, "%u ", val); |
60c2bc2d5
|
1664 1665 |
return 0; } |
2da8ca822
|
1666 |
static int cfq_print_leaf_weight(struct seq_file *sf, void *v) |
e71357e11
|
1667 |
{ |
e48453c38
|
1668 |
struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); |
9470e4a69
|
1669 1670 1671 1672 1673 |
struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg); unsigned int val = 0; if (cgd) val = cgd->leaf_weight; |
e48453c38
|
1674 |
|
9470e4a69
|
1675 1676 |
seq_printf(sf, "%u ", val); |
e71357e11
|
1677 1678 |
return 0; } |
451af504d
|
1679 1680 |
static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off, |
2ee867dcf
|
1681 |
bool on_dfl, bool is_leaf_weight) |
60c2bc2d5
|
1682 |
{ |
69d7fde59
|
1683 1684 |
unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN; unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX; |
451af504d
|
1685 |
struct blkcg *blkcg = css_to_blkcg(of_css(of)); |
60c2bc2d5
|
1686 |
struct blkg_conf_ctx ctx; |
3381cb8d2
|
1687 |
struct cfq_group *cfqg; |
e48453c38
|
1688 |
struct cfq_group_data *cfqgd; |
60c2bc2d5
|
1689 |
int ret; |
36aa9e5f5
|
1690 |
u64 v; |
60c2bc2d5
|
1691 |
|
3c798398e
|
1692 |
ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx); |
60c2bc2d5
|
1693 1694 |
if (ret) return ret; |
2ee867dcf
|
1695 1696 1697 1698 1699 1700 1701 1702 1703 |
if (sscanf(ctx.body, "%llu", &v) == 1) { /* require "default" on dfl */ ret = -ERANGE; if (!v && on_dfl) goto out_finish; } else if (!strcmp(strim(ctx.body), "default")) { v = 0; } else { ret = -EINVAL; |
36aa9e5f5
|
1704 |
goto out_finish; |
2ee867dcf
|
1705 |
} |
36aa9e5f5
|
1706 |
|
3381cb8d2
|
1707 |
cfqg = blkg_to_cfqg(ctx.blkg); |
e48453c38
|
1708 |
cfqgd = blkcg_to_cfqgd(blkcg); |
ae994ea97
|
1709 |
|
20386ce01
|
1710 |
ret = -ERANGE; |
69d7fde59
|
1711 |
if (!v || (v >= min && v <= max)) { |
e71357e11
|
1712 |
if (!is_leaf_weight) { |
36aa9e5f5
|
1713 1714 |
cfqg->dev_weight = v; cfqg->new_weight = v ?: cfqgd->weight; |
e71357e11
|
1715 |
} else { |
36aa9e5f5
|
1716 1717 |
cfqg->dev_leaf_weight = v; cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight; |
e71357e11
|
1718 |
} |
60c2bc2d5
|
1719 1720 |
ret = 0; } |
36aa9e5f5
|
1721 |
out_finish: |
60c2bc2d5
|
1722 |
blkg_conf_finish(&ctx); |
451af504d
|
1723 |
return ret ?: nbytes; |
60c2bc2d5
|
1724 |
} |
451af504d
|
1725 1726 |
static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) |
e71357e11
|
1727 |
{ |
2ee867dcf
|
1728 |
return __cfqg_set_weight_device(of, buf, nbytes, off, false, false); |
e71357e11
|
1729 |
} |
451af504d
|
1730 1731 |
static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) |
e71357e11
|
1732 |
{ |
2ee867dcf
|
1733 |
return __cfqg_set_weight_device(of, buf, nbytes, off, false, true); |
e71357e11
|
1734 |
} |
dd165eb3b
|
1735 |
static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val, |
69d7fde59
|
1736 |
bool on_dfl, bool reset_dev, bool is_leaf_weight) |
60c2bc2d5
|
1737 |
{ |
69d7fde59
|
1738 1739 |
unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN; unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX; |
182446d08
|
1740 |
struct blkcg *blkcg = css_to_blkcg(css); |
3c798398e
|
1741 |
struct blkcg_gq *blkg; |
e48453c38
|
1742 |
struct cfq_group_data *cfqgd; |
ae994ea97
|
1743 |
int ret = 0; |
60c2bc2d5
|
1744 |
|
69d7fde59
|
1745 1746 |
if (val < min || val > max) return -ERANGE; |
60c2bc2d5
|
1747 1748 |
spin_lock_irq(&blkcg->lock); |
e48453c38
|
1749 |
cfqgd = blkcg_to_cfqgd(blkcg); |
ae994ea97
|
1750 1751 1752 1753 |
if (!cfqgd) { ret = -EINVAL; goto out; } |
e71357e11
|
1754 1755 |
if (!is_leaf_weight) |
e48453c38
|
1756 |
cfqgd->weight = val; |
e71357e11
|
1757 |
else |
e48453c38
|
1758 |
cfqgd->leaf_weight = val; |
60c2bc2d5
|
1759 |
|
b67bfe0d4
|
1760 |
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
3381cb8d2
|
1761 |
struct cfq_group *cfqg = blkg_to_cfqg(blkg); |
60c2bc2d5
|
1762 |
|
e71357e11
|
1763 1764 1765 1766 |
if (!cfqg) continue; if (!is_leaf_weight) { |
69d7fde59
|
1767 1768 |
if (reset_dev) cfqg->dev_weight = 0; |
e71357e11
|
1769 |
if (!cfqg->dev_weight) |
e48453c38
|
1770 |
cfqg->new_weight = cfqgd->weight; |
e71357e11
|
1771 |
} else { |
69d7fde59
|
1772 1773 |
if (reset_dev) cfqg->dev_leaf_weight = 0; |
e71357e11
|
1774 |
if (!cfqg->dev_leaf_weight) |
e48453c38
|
1775 |
cfqg->new_leaf_weight = cfqgd->leaf_weight; |
e71357e11
|
1776 |
} |
60c2bc2d5
|
1777 |
} |
ae994ea97
|
1778 |
out: |
60c2bc2d5
|
1779 |
spin_unlock_irq(&blkcg->lock); |
ae994ea97
|
1780 |
return ret; |
60c2bc2d5
|
1781 |
} |
182446d08
|
1782 1783 |
static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft, u64 val) |
e71357e11
|
1784 |
{ |
69d7fde59
|
1785 |
return __cfq_set_weight(css, val, false, false, false); |
e71357e11
|
1786 |
} |
182446d08
|
1787 1788 |
static int cfq_set_leaf_weight(struct cgroup_subsys_state *css, struct cftype *cft, u64 val) |
e71357e11
|
1789 |
{ |
69d7fde59
|
1790 |
return __cfq_set_weight(css, val, false, false, true); |
e71357e11
|
1791 |
} |
2da8ca822
|
1792 |
static int cfqg_print_stat(struct seq_file *sf, void *v) |
5bc4afb1e
|
1793 |
{ |
2da8ca822
|
1794 1795 |
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, &blkcg_policy_cfq, seq_cft(sf)->private, false); |
5bc4afb1e
|
1796 1797 |
return 0; } |
2da8ca822
|
1798 |
static int cfqg_print_rwstat(struct seq_file *sf, void *v) |
5bc4afb1e
|
1799 |
{ |
2da8ca822
|
1800 1801 |
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, &blkcg_policy_cfq, seq_cft(sf)->private, true); |
5bc4afb1e
|
1802 1803 |
return 0; } |
43114018c
|
1804 1805 1806 |
static u64 cfqg_prfill_stat_recursive(struct seq_file *sf, struct blkg_policy_data *pd, int off) { |
f12c74cab
|
1807 1808 |
u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_cfq, off); |
43114018c
|
1809 1810 1811 1812 1813 1814 |
return __blkg_prfill_u64(sf, pd, sum); } static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf, struct blkg_policy_data *pd, int off) { |
f12c74cab
|
1815 1816 |
struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_cfq, off); |
43114018c
|
1817 1818 |
return __blkg_prfill_rwstat(sf, pd, &sum); } |
2da8ca822
|
1819 |
static int cfqg_print_stat_recursive(struct seq_file *sf, void *v) |
43114018c
|
1820 |
{ |
2da8ca822
|
1821 1822 1823 |
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), cfqg_prfill_stat_recursive, &blkcg_policy_cfq, seq_cft(sf)->private, false); |
43114018c
|
1824 1825 |
return 0; } |
2da8ca822
|
1826 |
static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v) |
43114018c
|
1827 |
{ |
2da8ca822
|
1828 1829 1830 |
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq, seq_cft(sf)->private, true); |
43114018c
|
1831 1832 |
return 0; } |
702747cab
|
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 |
static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, int off) { u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes); return __blkg_prfill_u64(sf, pd, sum >> 9); } static int cfqg_print_stat_sectors(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false); return 0; } static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL, offsetof(struct blkcg_gq, stat_bytes)); u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); return __blkg_prfill_u64(sf, pd, sum >> 9); } static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0, false); return 0; } |
60c2bc2d5
|
1866 |
#ifdef CONFIG_DEBUG_BLK_CGROUP |
f95a04afa
|
1867 1868 |
static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
60c2bc2d5
|
1869 |
{ |
f95a04afa
|
1870 |
struct cfq_group *cfqg = pd_to_cfqg(pd); |
155fead9b
|
1871 |
u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples); |
60c2bc2d5
|
1872 1873 1874 |
u64 v = 0; if (samples) { |
155fead9b
|
1875 |
v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); |
f3cff25f0
|
1876 |
v = div64_u64(v, samples); |
60c2bc2d5
|
1877 |
} |
f95a04afa
|
1878 |
__blkg_prfill_u64(sf, pd, v); |
60c2bc2d5
|
1879 1880 1881 1882 |
return 0; } /* print avg_queue_size */ |
2da8ca822
|
1883 |
static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v) |
60c2bc2d5
|
1884 |
{ |
2da8ca822
|
1885 1886 1887 |
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), cfqg_prfill_avg_queue_size, &blkcg_policy_cfq, 0, false); |
60c2bc2d5
|
1888 1889 1890 |
return 0; } #endif /* CONFIG_DEBUG_BLK_CGROUP */ |
880f50e22
|
1891 |
static struct cftype cfq_blkcg_legacy_files[] = { |
1d3650f71
|
1892 |
/* on root, weight is mapped to leaf_weight */ |
60c2bc2d5
|
1893 1894 |
{ .name = "weight_device", |
1d3650f71
|
1895 |
.flags = CFTYPE_ONLY_ON_ROOT, |
2da8ca822
|
1896 |
.seq_show = cfqg_print_leaf_weight_device, |
451af504d
|
1897 |
.write = cfqg_set_leaf_weight_device, |
60c2bc2d5
|
1898 1899 1900 |
}, { .name = "weight", |
1d3650f71
|
1901 |
.flags = CFTYPE_ONLY_ON_ROOT, |
2da8ca822
|
1902 |
.seq_show = cfq_print_leaf_weight, |
1d3650f71
|
1903 |
.write_u64 = cfq_set_leaf_weight, |
60c2bc2d5
|
1904 |
}, |
e71357e11
|
1905 |
|
1d3650f71
|
1906 |
/* no such mapping necessary for !roots */ |
60c2bc2d5
|
1907 1908 |
{ .name = "weight_device", |
1d3650f71
|
1909 |
.flags = CFTYPE_NOT_ON_ROOT, |
2da8ca822
|
1910 |
.seq_show = cfqg_print_weight_device, |
451af504d
|
1911 |
.write = cfqg_set_weight_device, |
60c2bc2d5
|
1912 1913 1914 |
}, { .name = "weight", |
1d3650f71
|
1915 |
.flags = CFTYPE_NOT_ON_ROOT, |
2da8ca822
|
1916 |
.seq_show = cfq_print_weight, |
3381cb8d2
|
1917 |
.write_u64 = cfq_set_weight, |
60c2bc2d5
|
1918 |
}, |
e71357e11
|
1919 |
|
e71357e11
|
1920 1921 |
{ .name = "leaf_weight_device", |
2da8ca822
|
1922 |
.seq_show = cfqg_print_leaf_weight_device, |
451af504d
|
1923 |
.write = cfqg_set_leaf_weight_device, |
e71357e11
|
1924 1925 1926 |
}, { .name = "leaf_weight", |
2da8ca822
|
1927 |
.seq_show = cfq_print_leaf_weight, |
e71357e11
|
1928 1929 |
.write_u64 = cfq_set_leaf_weight, }, |
43114018c
|
1930 |
/* statistics, covers only the tasks in the cfqg */ |
60c2bc2d5
|
1931 1932 |
{ .name = "time", |
5bc4afb1e
|
1933 |
.private = offsetof(struct cfq_group, stats.time), |
2da8ca822
|
1934 |
.seq_show = cfqg_print_stat, |
60c2bc2d5
|
1935 1936 1937 |
}, { .name = "sectors", |
702747cab
|
1938 |
.seq_show = cfqg_print_stat_sectors, |
60c2bc2d5
|
1939 1940 1941 |
}, { .name = "io_service_bytes", |
77ea73388
|
1942 1943 |
.private = (unsigned long)&blkcg_policy_cfq, .seq_show = blkg_print_stat_bytes, |
60c2bc2d5
|
1944 1945 1946 |
}, { .name = "io_serviced", |
77ea73388
|
1947 1948 |
.private = (unsigned long)&blkcg_policy_cfq, .seq_show = blkg_print_stat_ios, |
60c2bc2d5
|
1949 1950 1951 |
}, { .name = "io_service_time", |
5bc4afb1e
|
1952 |
.private = offsetof(struct cfq_group, stats.service_time), |
2da8ca822
|
1953 |
.seq_show = cfqg_print_rwstat, |
60c2bc2d5
|
1954 1955 1956 |
}, { .name = "io_wait_time", |
5bc4afb1e
|
1957 |
.private = offsetof(struct cfq_group, stats.wait_time), |
2da8ca822
|
1958 |
.seq_show = cfqg_print_rwstat, |
60c2bc2d5
|
1959 1960 1961 |
}, { .name = "io_merged", |
5bc4afb1e
|
1962 |
.private = offsetof(struct cfq_group, stats.merged), |
2da8ca822
|
1963 |
.seq_show = cfqg_print_rwstat, |
60c2bc2d5
|
1964 1965 1966 |
}, { .name = "io_queued", |
5bc4afb1e
|
1967 |
.private = offsetof(struct cfq_group, stats.queued), |
2da8ca822
|
1968 |
.seq_show = cfqg_print_rwstat, |
60c2bc2d5
|
1969 |
}, |
43114018c
|
1970 1971 1972 1973 1974 |
/* the same statictics which cover the cfqg and its descendants */ { .name = "time_recursive", .private = offsetof(struct cfq_group, stats.time), |
2da8ca822
|
1975 |
.seq_show = cfqg_print_stat_recursive, |
43114018c
|
1976 1977 1978 |
}, { .name = "sectors_recursive", |
702747cab
|
1979 |
.seq_show = cfqg_print_stat_sectors_recursive, |
43114018c
|
1980 1981 1982 |
}, { .name = "io_service_bytes_recursive", |
77ea73388
|
1983 1984 |
.private = (unsigned long)&blkcg_policy_cfq, .seq_show = blkg_print_stat_bytes_recursive, |
43114018c
|
1985 1986 1987 |
}, { .name = "io_serviced_recursive", |
77ea73388
|
1988 1989 |
.private = (unsigned long)&blkcg_policy_cfq, .seq_show = blkg_print_stat_ios_recursive, |
43114018c
|
1990 1991 1992 1993 |
}, { .name = "io_service_time_recursive", .private = offsetof(struct cfq_group, stats.service_time), |
2da8ca822
|
1994 |
.seq_show = cfqg_print_rwstat_recursive, |
43114018c
|
1995 1996 1997 1998 |
}, { .name = "io_wait_time_recursive", .private = offsetof(struct cfq_group, stats.wait_time), |
2da8ca822
|
1999 |
.seq_show = cfqg_print_rwstat_recursive, |
43114018c
|
2000 2001 2002 2003 |
}, { .name = "io_merged_recursive", .private = offsetof(struct cfq_group, stats.merged), |
2da8ca822
|
2004 |
.seq_show = cfqg_print_rwstat_recursive, |
43114018c
|
2005 2006 2007 2008 |
}, { .name = "io_queued_recursive", .private = offsetof(struct cfq_group, stats.queued), |
2da8ca822
|
2009 |
.seq_show = cfqg_print_rwstat_recursive, |
43114018c
|
2010 |
}, |
60c2bc2d5
|
2011 2012 2013 |
#ifdef CONFIG_DEBUG_BLK_CGROUP { .name = "avg_queue_size", |
2da8ca822
|
2014 |
.seq_show = cfqg_print_avg_queue_size, |
60c2bc2d5
|
2015 2016 2017 |
}, { .name = "group_wait_time", |
5bc4afb1e
|
2018 |
.private = offsetof(struct cfq_group, stats.group_wait_time), |
2da8ca822
|
2019 |
.seq_show = cfqg_print_stat, |
60c2bc2d5
|
2020 2021 2022 |
}, { .name = "idle_time", |
5bc4afb1e
|
2023 |
.private = offsetof(struct cfq_group, stats.idle_time), |
2da8ca822
|
2024 |
.seq_show = cfqg_print_stat, |
60c2bc2d5
|
2025 2026 2027 |
}, { .name = "empty_time", |
5bc4afb1e
|
2028 |
.private = offsetof(struct cfq_group, stats.empty_time), |
2da8ca822
|
2029 |
.seq_show = cfqg_print_stat, |
60c2bc2d5
|
2030 2031 2032 |
}, { .name = "dequeue", |
5bc4afb1e
|
2033 |
.private = offsetof(struct cfq_group, stats.dequeue), |
2da8ca822
|
2034 |
.seq_show = cfqg_print_stat, |
60c2bc2d5
|
2035 2036 2037 |
}, { .name = "unaccounted_time", |
5bc4afb1e
|
2038 |
.private = offsetof(struct cfq_group, stats.unaccounted_time), |
2da8ca822
|
2039 |
.seq_show = cfqg_print_stat, |
60c2bc2d5
|
2040 2041 2042 2043 |
}, #endif /* CONFIG_DEBUG_BLK_CGROUP */ { } /* terminate */ }; |
2ee867dcf
|
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 |
static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v) { struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg); seq_printf(sf, "default %u ", cgd->weight); blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device, &blkcg_policy_cfq, 0, false); return 0; } static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { char *endp; int ret; u64 v; buf = strim(buf); /* "WEIGHT" or "default WEIGHT" sets the default weight */ v = simple_strtoull(buf, &endp, 0); if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) { |
69d7fde59
|
2069 |
ret = __cfq_set_weight(of_css(of), v, true, false, false); |
2ee867dcf
|
2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 |
return ret ?: nbytes; } /* "MAJ:MIN WEIGHT" */ return __cfqg_set_weight_device(of, buf, nbytes, off, true, false); } static struct cftype cfq_blkcg_files[] = { { .name = "weight", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = cfq_print_weight_on_dfl, .write = cfq_set_weight_on_dfl, }, { } /* terminate */ }; |
25fb5169d
|
2086 |
#else /* GROUP_IOSCHED */ |
ae1188963
|
2087 2088 |
static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd, struct blkcg *blkcg) |
25fb5169d
|
2089 |
{ |
f51b802c1
|
2090 |
return cfqd->root_group; |
25fb5169d
|
2091 |
} |
7f1dc8a2d
|
2092 |
|
25fb5169d
|
2093 2094 2095 2096 2097 2098 |
static inline void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { cfqq->cfqg = cfqg; } #endif /* GROUP_IOSCHED */ |
498d3aa2b
|
2099 |
/* |
c0324a020
|
2100 |
* The cfqd->service_trees holds all pending cfq_queue's that have |
498d3aa2b
|
2101 2102 2103 |
* requests waiting to be processed. It is sorted in the order that * we will service the queues. */ |
a36e71f99
|
2104 |
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
a6151c3a5
|
2105 |
bool add_front) |
d9e7620e6
|
2106 |
{ |
0871714e0
|
2107 2108 |
struct rb_node **p, *parent; struct cfq_queue *__cfqq; |
9a7f38c42
|
2109 |
u64 rb_key; |
34b98d03b
|
2110 |
struct cfq_rb_root *st; |
498d3aa2b
|
2111 |
int left; |
dae739ebc
|
2112 |
int new_cfqq = 1; |
9a7f38c42
|
2113 |
u64 now = ktime_get_ns(); |
ae30c2865
|
2114 |
|
34b98d03b
|
2115 |
st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); |
0871714e0
|
2116 2117 |
if (cfq_class_idle(cfqq)) { rb_key = CFQ_IDLE_DELAY; |
34b98d03b
|
2118 |
parent = rb_last(&st->rb); |
0871714e0
|
2119 2120 2121 2122 |
if (parent && parent != &cfqq->rb_node) { __cfqq = rb_entry(parent, struct cfq_queue, rb_node); rb_key += __cfqq->rb_key; } else |
9a7f38c42
|
2123 |
rb_key += now; |
0871714e0
|
2124 |
} else if (!add_front) { |
b9c8946b1
|
2125 2126 2127 2128 2129 2130 |
/* * Get our rb key offset. Subtract any residual slice * value carried from last service. A negative resid * count indicates slice overrun, and this should position * the next service time further away in the tree. */ |
9a7f38c42
|
2131 |
rb_key = cfq_slice_offset(cfqd, cfqq) + now; |
b9c8946b1
|
2132 |
rb_key -= cfqq->slice_resid; |
edd75ffd9
|
2133 |
cfqq->slice_resid = 0; |
48e025e63
|
2134 |
} else { |
9a7f38c42
|
2135 |
rb_key = -NSEC_PER_SEC; |
34b98d03b
|
2136 |
__cfqq = cfq_rb_first(st); |
9a7f38c42
|
2137 |
rb_key += __cfqq ? __cfqq->rb_key : now; |
48e025e63
|
2138 |
} |
1da177e4c
|
2139 |
|
d9e7620e6
|
2140 |
if (!RB_EMPTY_NODE(&cfqq->rb_node)) { |
dae739ebc
|
2141 |
new_cfqq = 0; |
99f9628ab
|
2142 |
/* |
d9e7620e6
|
2143 |
* same position, nothing more to do |
99f9628ab
|
2144 |
*/ |
34b98d03b
|
2145 |
if (rb_key == cfqq->rb_key && cfqq->service_tree == st) |
d9e7620e6
|
2146 |
return; |
1da177e4c
|
2147 |
|
aa6f6a3de
|
2148 2149 |
cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); cfqq->service_tree = NULL; |
1da177e4c
|
2150 |
} |
d9e7620e6
|
2151 |
|
498d3aa2b
|
2152 |
left = 1; |
0871714e0
|
2153 |
parent = NULL; |
34b98d03b
|
2154 2155 |
cfqq->service_tree = st; p = &st->rb.rb_node; |
d9e7620e6
|
2156 2157 2158 |
while (*p) { parent = *p; __cfqq = rb_entry(parent, struct cfq_queue, rb_node); |
0c534e0a4
|
2159 |
/* |
c0324a020
|
2160 |
* sort by key, that represents service time. |
0c534e0a4
|
2161 |
*/ |
9a7f38c42
|
2162 |
if (rb_key < __cfqq->rb_key) |
1f23f1215
|
2163 |
p = &parent->rb_left; |
c0324a020
|
2164 |
else { |
1f23f1215
|
2165 |
p = &parent->rb_right; |
cc09e2990
|
2166 |
left = 0; |
c0324a020
|
2167 |
} |
d9e7620e6
|
2168 |
} |
cc09e2990
|
2169 |
if (left) |
34b98d03b
|
2170 |
st->left = &cfqq->rb_node; |
cc09e2990
|
2171 |
|
d9e7620e6
|
2172 2173 |
cfqq->rb_key = rb_key; rb_link_node(&cfqq->rb_node, parent, p); |
34b98d03b
|
2174 2175 |
rb_insert_color(&cfqq->rb_node, &st->rb); st->count++; |
20359f27e
|
2176 |
if (add_front || !new_cfqq) |
dae739ebc
|
2177 |
return; |
8184f93ec
|
2178 |
cfq_group_notify_queue_add(cfqd, cfqq->cfqg); |
1da177e4c
|
2179 |
} |
a36e71f99
|
2180 |
static struct cfq_queue * |
f2d1f0ae7
|
2181 2182 2183 |
cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, sector_t sector, struct rb_node **ret_parent, struct rb_node ***rb_link) |
a36e71f99
|
2184 |
{ |
a36e71f99
|
2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 |
struct rb_node **p, *parent; struct cfq_queue *cfqq = NULL; parent = NULL; p = &root->rb_node; while (*p) { struct rb_node **n; parent = *p; cfqq = rb_entry(parent, struct cfq_queue, p_node); /* * Sort strictly based on sector. Smallest to the left, * largest to the right. */ |
2e46e8b27
|
2200 |
if (sector > blk_rq_pos(cfqq->next_rq)) |
a36e71f99
|
2201 |
n = &(*p)->rb_right; |
2e46e8b27
|
2202 |
else if (sector < blk_rq_pos(cfqq->next_rq)) |
a36e71f99
|
2203 2204 2205 2206 |
n = &(*p)->rb_left; else break; p = n; |
3ac6c9f8a
|
2207 |
cfqq = NULL; |
a36e71f99
|
2208 2209 2210 2211 2212 |
} *ret_parent = parent; if (rb_link) *rb_link = p; |
3ac6c9f8a
|
2213 |
return cfqq; |
a36e71f99
|
2214 2215 2216 2217 |
} static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) { |
a36e71f99
|
2218 2219 |
struct rb_node **p, *parent; struct cfq_queue *__cfqq; |
f2d1f0ae7
|
2220 2221 2222 2223 |
if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } |
a36e71f99
|
2224 2225 2226 2227 2228 |
if (cfq_class_idle(cfqq)) return; if (!cfqq->next_rq) return; |
f2d1f0ae7
|
2229 |
cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; |
2e46e8b27
|
2230 2231 |
__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, blk_rq_pos(cfqq->next_rq), &parent, &p); |
3ac6c9f8a
|
2232 2233 |
if (!__cfqq) { rb_link_node(&cfqq->p_node, parent, p); |
f2d1f0ae7
|
2234 2235 2236 |
rb_insert_color(&cfqq->p_node, cfqq->p_root); } else cfqq->p_root = NULL; |
a36e71f99
|
2237 |
} |
498d3aa2b
|
2238 2239 2240 |
/* * Update cfqq's position in the service tree. */ |
edd75ffd9
|
2241 |
static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
6d048f531
|
2242 |
{ |
6d048f531
|
2243 2244 2245 |
/* * Resorting requires the cfqq to be on the RR list already. */ |
a36e71f99
|
2246 |
if (cfq_cfqq_on_rr(cfqq)) { |
edd75ffd9
|
2247 |
cfq_service_tree_add(cfqd, cfqq, 0); |
a36e71f99
|
2248 2249 |
cfq_prio_tree_add(cfqd, cfqq); } |
6d048f531
|
2250 |
} |
1da177e4c
|
2251 2252 |
/* * add to busy list of queues for service, trying to be fair in ordering |
22e2c507c
|
2253 |
* the pending list according to last request service |
1da177e4c
|
2254 |
*/ |
febffd618
|
2255 |
static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
2256 |
{ |
7b679138b
|
2257 |
cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); |
3b18152c3
|
2258 2259 |
BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_mark_cfqq_on_rr(cfqq); |
1da177e4c
|
2260 |
cfqd->busy_queues++; |
ef8a41df8
|
2261 2262 |
if (cfq_cfqq_sync(cfqq)) cfqd->busy_sync_queues++; |
1da177e4c
|
2263 |
|
edd75ffd9
|
2264 |
cfq_resort_rr_list(cfqd, cfqq); |
1da177e4c
|
2265 |
} |
498d3aa2b
|
2266 2267 2268 2269 |
/* * Called when the cfqq no longer has requests pending, remove it from * the service tree. */ |
febffd618
|
2270 |
static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1da177e4c
|
2271 |
{ |
7b679138b
|
2272 |
cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); |
3b18152c3
|
2273 2274 |
BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_clear_cfqq_on_rr(cfqq); |
1da177e4c
|
2275 |
|
aa6f6a3de
|
2276 2277 2278 2279 |
if (!RB_EMPTY_NODE(&cfqq->rb_node)) { cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); cfqq->service_tree = NULL; } |
f2d1f0ae7
|
2280 2281 2282 2283 |
if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } |
d9e7620e6
|
2284 |
|
8184f93ec
|
2285 |
cfq_group_notify_queue_del(cfqd, cfqq->cfqg); |
1da177e4c
|
2286 2287 |
BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; |
ef8a41df8
|
2288 2289 |
if (cfq_cfqq_sync(cfqq)) cfqd->busy_sync_queues--; |
1da177e4c
|
2290 2291 2292 2293 2294 |
} /* * rb tree support functions */ |
febffd618
|
2295 |
static void cfq_del_rq_rb(struct request *rq) |
1da177e4c
|
2296 |
{ |
5e7053747
|
2297 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
5e7053747
|
2298 |
const int sync = rq_is_sync(rq); |
1da177e4c
|
2299 |
|
b4878f245
|
2300 2301 |
BUG_ON(!cfqq->queued[sync]); cfqq->queued[sync]--; |
1da177e4c
|
2302 |
|
5e7053747
|
2303 |
elv_rb_del(&cfqq->sort_list, rq); |
1da177e4c
|
2304 |
|
f04a64246
|
2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 |
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) { /* * Queue will be deleted from service tree when we actually * expire it later. Right now just remove it from prio tree * as it is empty. */ if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } } |
1da177e4c
|
2316 |
} |
5e7053747
|
2317 |
static void cfq_add_rq_rb(struct request *rq) |
1da177e4c
|
2318 |
{ |
5e7053747
|
2319 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
1da177e4c
|
2320 |
struct cfq_data *cfqd = cfqq->cfqd; |
796d5116c
|
2321 |
struct request *prev; |
1da177e4c
|
2322 |
|
5380a101d
|
2323 |
cfqq->queued[rq_is_sync(rq)]++; |
1da177e4c
|
2324 |
|
796d5116c
|
2325 |
elv_rb_add(&cfqq->sort_list, rq); |
5fccbf61b
|
2326 2327 2328 |
if (!cfq_cfqq_on_rr(cfqq)) cfq_add_cfqq_rr(cfqd, cfqq); |
5044eed48
|
2329 2330 2331 2332 |
/* * check if this request is a better next-serve candidate */ |
a36e71f99
|
2333 |
prev = cfqq->next_rq; |
cf7c25cf9
|
2334 |
cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); |
a36e71f99
|
2335 2336 2337 2338 2339 2340 |
/* * adjust priority tree position, if ->next_rq changes */ if (prev != cfqq->next_rq) cfq_prio_tree_add(cfqd, cfqq); |
5044eed48
|
2341 |
BUG_ON(!cfqq->next_rq); |
1da177e4c
|
2342 |
} |
febffd618
|
2343 |
static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) |
1da177e4c
|
2344 |
{ |
5380a101d
|
2345 2346 |
elv_rb_del(&cfqq->sort_list, rq); cfqq->queued[rq_is_sync(rq)]--; |
63a4cc248
|
2347 |
cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags); |
5e7053747
|
2348 |
cfq_add_rq_rb(rq); |
155fead9b
|
2349 |
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, |
63a4cc248
|
2350 |
req_op(rq), rq->cmd_flags); |
1da177e4c
|
2351 |
} |
206dc69b3
|
2352 2353 |
static struct request * cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) |
1da177e4c
|
2354 |
{ |
206dc69b3
|
2355 |
struct task_struct *tsk = current; |
c58698073
|
2356 |
struct cfq_io_cq *cic; |
206dc69b3
|
2357 |
struct cfq_queue *cfqq; |
1da177e4c
|
2358 |
|
4ac845a2e
|
2359 |
cic = cfq_cic_lookup(cfqd, tsk->io_context); |
91fac317a
|
2360 2361 2362 2363 |
if (!cic) return NULL; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); |
f73a1c7d1
|
2364 2365 |
if (cfqq) return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio)); |
1da177e4c
|
2366 |
|
1da177e4c
|
2367 2368 |
return NULL; } |
165125e1e
|
2369 |
static void cfq_activate_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
2370 |
{ |
22e2c507c
|
2371 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
3b18152c3
|
2372 |
|
53c583d22
|
2373 |
cfqd->rq_in_driver++; |
7b679138b
|
2374 |
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", |
53c583d22
|
2375 |
cfqd->rq_in_driver); |
25776e359
|
2376 |
|
5b93629b4
|
2377 |
cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); |
1da177e4c
|
2378 |
} |
165125e1e
|
2379 |
static void cfq_deactivate_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
2380 |
{ |
b4878f245
|
2381 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
53c583d22
|
2382 2383 |
WARN_ON(!cfqd->rq_in_driver); cfqd->rq_in_driver--; |
7b679138b
|
2384 |
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", |
53c583d22
|
2385 |
cfqd->rq_in_driver); |
1da177e4c
|
2386 |
} |
b4878f245
|
2387 |
static void cfq_remove_request(struct request *rq) |
1da177e4c
|
2388 |
{ |
5e7053747
|
2389 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
21183b07e
|
2390 |
|
5e7053747
|
2391 2392 |
if (cfqq->next_rq == rq) cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); |
1da177e4c
|
2393 |
|
b4878f245
|
2394 |
list_del_init(&rq->queuelist); |
5e7053747
|
2395 |
cfq_del_rq_rb(rq); |
374f84ac3
|
2396 |
|
45333d5a3
|
2397 |
cfqq->cfqd->rq_queued--; |
63a4cc248
|
2398 |
cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags); |
65299a3b7
|
2399 2400 2401 |
if (rq->cmd_flags & REQ_PRIO) { WARN_ON(!cfqq->prio_pending); cfqq->prio_pending--; |
b53d1ed73
|
2402 |
} |
1da177e4c
|
2403 |
} |
165125e1e
|
2404 2405 |
static int cfq_merge(struct request_queue *q, struct request **req, struct bio *bio) |
1da177e4c
|
2406 2407 2408 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; struct request *__rq; |
1da177e4c
|
2409 |
|
206dc69b3
|
2410 |
__rq = cfq_find_rq_fmerge(cfqd, bio); |
72ef799b3
|
2411 |
if (__rq && elv_bio_merge_ok(__rq, bio)) { |
9817064b6
|
2412 2413 |
*req = __rq; return ELEVATOR_FRONT_MERGE; |
1da177e4c
|
2414 2415 2416 |
} return ELEVATOR_NO_MERGE; |
1da177e4c
|
2417 |
} |
165125e1e
|
2418 |
static void cfq_merged_request(struct request_queue *q, struct request *req, |
21183b07e
|
2419 |
int type) |
1da177e4c
|
2420 |
{ |
21183b07e
|
2421 |
if (type == ELEVATOR_FRONT_MERGE) { |
5e7053747
|
2422 |
struct cfq_queue *cfqq = RQ_CFQQ(req); |
1da177e4c
|
2423 |
|
5e7053747
|
2424 |
cfq_reposition_rq_rb(cfqq, req); |
1da177e4c
|
2425 |
} |
1da177e4c
|
2426 |
} |
812d40264
|
2427 2428 2429 |
static void cfq_bio_merged(struct request_queue *q, struct request *req, struct bio *bio) { |
1eff9d322
|
2430 |
cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf); |
812d40264
|
2431 |
} |
1da177e4c
|
2432 |
static void |
165125e1e
|
2433 |
cfq_merged_requests(struct request_queue *q, struct request *rq, |
1da177e4c
|
2434 2435 |
struct request *next) { |
cf7c25cf9
|
2436 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
4a0b75c7d
|
2437 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
22e2c507c
|
2438 2439 2440 2441 |
/* * reposition in fifo if next is older than rq */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && |
9a7f38c42
|
2442 |
next->fifo_time < rq->fifo_time && |
3d106fba2
|
2443 |
cfqq == RQ_CFQQ(next)) { |
22e2c507c
|
2444 |
list_move(&rq->queuelist, &next->queuelist); |
8b4922d31
|
2445 |
rq->fifo_time = next->fifo_time; |
30996f40b
|
2446 |
} |
22e2c507c
|
2447 |
|
cf7c25cf9
|
2448 2449 |
if (cfqq->next_rq == next) cfqq->next_rq = rq; |
b4878f245
|
2450 |
cfq_remove_request(next); |
63a4cc248
|
2451 |
cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags); |
4a0b75c7d
|
2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 |
cfqq = RQ_CFQQ(next); /* * all requests of this queue are merged to other queues, delete it * from the service tree. If it's the active_queue, * cfq_dispatch_requests() will choose to expire it or do idle */ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && cfqq != cfqd->active_queue) cfq_del_cfqq_rr(cfqd, cfqq); |
22e2c507c
|
2462 |
} |
72ef799b3
|
2463 2464 |
static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq, struct bio *bio) |
da7752650
|
2465 2466 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; |
c58698073
|
2467 |
struct cfq_io_cq *cic; |
da7752650
|
2468 |
struct cfq_queue *cfqq; |
da7752650
|
2469 2470 |
/* |
ec8acb690
|
2471 |
* Disallow merge of a sync bio into an async request. |
da7752650
|
2472 |
*/ |
91fac317a
|
2473 |
if (cfq_bio_sync(bio) && !rq_is_sync(rq)) |
a6151c3a5
|
2474 |
return false; |
da7752650
|
2475 2476 |
/* |
f1a4f4d35
|
2477 |
* Lookup the cfqq that this bio will be queued with and allow |
07c2bd373
|
2478 |
* merge only if rq is queued there. |
f1a4f4d35
|
2479 |
*/ |
07c2bd373
|
2480 2481 2482 |
cic = cfq_cic_lookup(cfqd, current->io_context); if (!cic) return false; |
719d34027
|
2483 |
|
91fac317a
|
2484 |
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); |
a6151c3a5
|
2485 |
return cfqq == RQ_CFQQ(rq); |
da7752650
|
2486 |
} |
72ef799b3
|
2487 2488 2489 2490 2491 |
static int cfq_allow_rq_merge(struct request_queue *q, struct request *rq, struct request *next) { return RQ_CFQQ(rq) == RQ_CFQQ(next); } |
812df48d1
|
2492 2493 |
static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) { |
911483258
|
2494 |
hrtimer_try_to_cancel(&cfqd->idle_slice_timer); |
155fead9b
|
2495 |
cfqg_stats_update_idle_time(cfqq->cfqg); |
812df48d1
|
2496 |
} |
febffd618
|
2497 2498 |
static void __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
22e2c507c
|
2499 2500 |
{ if (cfqq) { |
3bf10fea3
|
2501 |
cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d", |
4d2ceea4c
|
2502 |
cfqd->serving_wl_class, cfqd->serving_wl_type); |
155fead9b
|
2503 |
cfqg_stats_update_avg_queue_size(cfqq->cfqg); |
62a37f6ba
|
2504 |
cfqq->slice_start = 0; |
9a7f38c42
|
2505 |
cfqq->dispatch_start = ktime_get_ns(); |
62a37f6ba
|
2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 |
cfqq->allocated_slice = 0; cfqq->slice_end = 0; cfqq->slice_dispatch = 0; cfqq->nr_sectors = 0; cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_fifo_expire(cfqq); cfq_mark_cfqq_slice_new(cfqq); cfq_del_timer(cfqd, cfqq); |
22e2c507c
|
2518 2519 2520 2521 2522 2523 |
} cfqd->active_queue = cfqq; } /* |
7b14e3b52
|
2524 2525 2526 2527 |
* current cfqq expired its slice (or was too idle), select new one */ static void __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
e5ff082e8
|
2528 |
bool timed_out) |
7b14e3b52
|
2529 |
{ |
7b679138b
|
2530 |
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); |
7b14e3b52
|
2531 |
if (cfq_cfqq_wait_request(cfqq)) |
812df48d1
|
2532 |
cfq_del_timer(cfqd, cfqq); |
7b14e3b52
|
2533 |
|
7b14e3b52
|
2534 |
cfq_clear_cfqq_wait_request(cfqq); |
f75edf2dc
|
2535 |
cfq_clear_cfqq_wait_busy(cfqq); |
7b14e3b52
|
2536 2537 |
/* |
ae54abed6
|
2538 2539 2540 2541 2542 2543 2544 2545 2546 |
* If this cfqq is shared between multiple processes, check to * make sure that those processes are still issuing I/Os within * the mean seek distance. If not, it may be time to break the * queues apart again. */ if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) cfq_mark_cfqq_split_coop(cfqq); /* |
6084cdda0
|
2547 |
* store what was left of this slice, if the queue idled/timed out |
7b14e3b52
|
2548 |
*/ |
c553f8e33
|
2549 2550 |
if (timed_out) { if (cfq_cfqq_slice_new(cfqq)) |
ba5bd520f
|
2551 |
cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); |
c553f8e33
|
2552 |
else |
9a7f38c42
|
2553 |
cfqq->slice_resid = cfqq->slice_end - ktime_get_ns(); |
93fdf1478
|
2554 |
cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid); |
7b679138b
|
2555 |
} |
7b14e3b52
|
2556 |
|
e5ff082e8
|
2557 |
cfq_group_served(cfqd, cfqq->cfqg, cfqq); |
dae739ebc
|
2558 |
|
f04a64246
|
2559 2560 |
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_del_cfqq_rr(cfqd, cfqq); |
edd75ffd9
|
2561 |
cfq_resort_rr_list(cfqd, cfqq); |
7b14e3b52
|
2562 2563 2564 2565 2566 |
if (cfqq == cfqd->active_queue) cfqd->active_queue = NULL; if (cfqd->active_cic) { |
11a3122f6
|
2567 |
put_io_context(cfqd->active_cic->icq.ioc); |
7b14e3b52
|
2568 2569 |
cfqd->active_cic = NULL; } |
7b14e3b52
|
2570 |
} |
e5ff082e8
|
2571 |
static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) |
7b14e3b52
|
2572 2573 2574 2575 |
{ struct cfq_queue *cfqq = cfqd->active_queue; if (cfqq) |
e5ff082e8
|
2576 |
__cfq_slice_expired(cfqd, cfqq, timed_out); |
7b14e3b52
|
2577 |
} |
498d3aa2b
|
2578 2579 2580 2581 |
/* * Get next queue for service. Unless we have a queue preemption, * we'll simply select the first cfqq in the service tree. */ |
6d048f531
|
2582 |
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) |
22e2c507c
|
2583 |
{ |
34b98d03b
|
2584 2585 |
struct cfq_rb_root *st = st_for(cfqd->serving_group, cfqd->serving_wl_class, cfqd->serving_wl_type); |
d9e7620e6
|
2586 |
|
f04a64246
|
2587 2588 |
if (!cfqd->rq_queued) return NULL; |
1fa8f6d68
|
2589 |
/* There is nothing to dispatch */ |
34b98d03b
|
2590 |
if (!st) |
1fa8f6d68
|
2591 |
return NULL; |
34b98d03b
|
2592 |
if (RB_EMPTY_ROOT(&st->rb)) |
c0324a020
|
2593 |
return NULL; |
34b98d03b
|
2594 |
return cfq_rb_first(st); |
6d048f531
|
2595 |
} |
f04a64246
|
2596 2597 |
static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) { |
25fb5169d
|
2598 |
struct cfq_group *cfqg; |
f04a64246
|
2599 2600 2601 2602 2603 2604 |
struct cfq_queue *cfqq; int i, j; struct cfq_rb_root *st; if (!cfqd->rq_queued) return NULL; |
25fb5169d
|
2605 2606 2607 |
cfqg = cfq_get_next_cfqg(cfqd); if (!cfqg) return NULL; |
f04a64246
|
2608 2609 2610 2611 2612 |
for_each_cfqg_st(cfqg, i, j, st) if ((cfqq = cfq_rb_first(st)) != NULL) return cfqq; return NULL; } |
498d3aa2b
|
2613 2614 2615 |
/* * Get and set a new active queue for service. */ |
a36e71f99
|
2616 2617 |
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
6d048f531
|
2618 |
{ |
e00ef7997
|
2619 |
if (!cfqq) |
a36e71f99
|
2620 |
cfqq = cfq_get_next_queue(cfqd); |
6d048f531
|
2621 |
|
22e2c507c
|
2622 |
__cfq_set_active_queue(cfqd, cfqq); |
3b18152c3
|
2623 |
return cfqq; |
22e2c507c
|
2624 |
} |
d9e7620e6
|
2625 2626 2627 |
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, struct request *rq) { |
83096ebf1
|
2628 2629 |
if (blk_rq_pos(rq) >= cfqd->last_position) return blk_rq_pos(rq) - cfqd->last_position; |
d9e7620e6
|
2630 |
else |
83096ebf1
|
2631 |
return cfqd->last_position - blk_rq_pos(rq); |
d9e7620e6
|
2632 |
} |
b2c18e1e0
|
2633 |
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
e9ce335df
|
2634 |
struct request *rq) |
6d048f531
|
2635 |
{ |
e9ce335df
|
2636 |
return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; |
6d048f531
|
2637 |
} |
a36e71f99
|
2638 2639 2640 |
static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, struct cfq_queue *cur_cfqq) { |
f2d1f0ae7
|
2641 |
struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; |
a36e71f99
|
2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 |
struct rb_node *parent, *node; struct cfq_queue *__cfqq; sector_t sector = cfqd->last_position; if (RB_EMPTY_ROOT(root)) return NULL; /* * First, if we find a request starting at the end of the last * request, choose it. */ |
f2d1f0ae7
|
2653 |
__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); |
a36e71f99
|
2654 2655 2656 2657 2658 2659 2660 2661 |
if (__cfqq) return __cfqq; /* * If the exact sector wasn't found, the parent of the NULL leaf * will contain the closest sector. */ __cfqq = rb_entry(parent, struct cfq_queue, p_node); |
e9ce335df
|
2662 |
if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) |
a36e71f99
|
2663 |
return __cfqq; |
2e46e8b27
|
2664 |
if (blk_rq_pos(__cfqq->next_rq) < sector) |
a36e71f99
|
2665 2666 2667 2668 2669 2670 2671 |
node = rb_next(&__cfqq->p_node); else node = rb_prev(&__cfqq->p_node); if (!node) return NULL; __cfqq = rb_entry(node, struct cfq_queue, p_node); |
e9ce335df
|
2672 |
if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) |
a36e71f99
|
2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 |
return __cfqq; return NULL; } /* * cfqd - obvious * cur_cfqq - passed in so that we don't decide that the current queue is * closely cooperating with itself. * * So, basically we're assuming that that cur_cfqq has dispatched at least * one request, and that cfqd->last_position reflects a position on the disk * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid * assumption. */ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, |
b3b6d0408
|
2689 |
struct cfq_queue *cur_cfqq) |
6d048f531
|
2690 |
{ |
a36e71f99
|
2691 |
struct cfq_queue *cfqq; |
39c01b219
|
2692 2693 |
if (cfq_class_idle(cur_cfqq)) return NULL; |
e6c5bc737
|
2694 2695 2696 2697 |
if (!cfq_cfqq_sync(cur_cfqq)) return NULL; if (CFQQ_SEEKY(cur_cfqq)) return NULL; |
a36e71f99
|
2698 |
/* |
b9d8f4c73
|
2699 2700 2701 2702 2703 2704 |
* Don't search priority tree if it's the only queue in the group. */ if (cur_cfqq->cfqg->nr_cfqq == 1) return NULL; /* |
d9e7620e6
|
2705 2706 2707 |
* We should notice if some of the queues are cooperating, eg * working closely on the same area of the disk. In that case, * we can group them together and don't waste time idling. |
6d048f531
|
2708 |
*/ |
a36e71f99
|
2709 2710 2711 |
cfqq = cfqq_close(cfqd, cur_cfqq); if (!cfqq) return NULL; |
8682e1f15
|
2712 2713 2714 |
/* If new queue belongs to different cfq_group, don't choose it */ if (cur_cfqq->cfqg != cfqq->cfqg) return NULL; |
df5fe3e8e
|
2715 2716 2717 2718 2719 |
/* * It only makes sense to merge sync queues. */ if (!cfq_cfqq_sync(cfqq)) return NULL; |
e6c5bc737
|
2720 2721 |
if (CFQQ_SEEKY(cfqq)) return NULL; |
df5fe3e8e
|
2722 |
|
c0324a020
|
2723 2724 2725 2726 2727 |
/* * Do not merge queues of different priority classes */ if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq)) return NULL; |
a36e71f99
|
2728 |
return cfqq; |
6d048f531
|
2729 |
} |
a6d44e982
|
2730 2731 2732 2733 2734 2735 |
/* * Determine whether we should enforce idle window for this queue. */ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) { |
3bf10fea3
|
2736 |
enum wl_class_t wl_class = cfqq_class(cfqq); |
34b98d03b
|
2737 |
struct cfq_rb_root *st = cfqq->service_tree; |
a6d44e982
|
2738 |
|
34b98d03b
|
2739 2740 |
BUG_ON(!st); BUG_ON(!st->count); |
f04a64246
|
2741 |
|
b6508c161
|
2742 2743 |
if (!cfqd->cfq_slice_idle) return false; |
a6d44e982
|
2744 |
/* We never do for idle class queues. */ |
3bf10fea3
|
2745 |
if (wl_class == IDLE_WORKLOAD) |
a6d44e982
|
2746 2747 2748 |
return false; /* We do for queues that were marked with idle window flag. */ |
3c764b7a6
|
2749 2750 |
if (cfq_cfqq_idle_window(cfqq) && !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) |
a6d44e982
|
2751 2752 2753 2754 2755 2756 |
return true; /* * Otherwise, we do only if they are the last ones * in their service tree. */ |
34b98d03b
|
2757 2758 |
if (st->count == 1 && cfq_cfqq_sync(cfqq) && !cfq_io_thinktime_big(cfqd, &st->ttime, false)) |
c1e44756f
|
2759 |
return true; |
34b98d03b
|
2760 |
cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count); |
c1e44756f
|
2761 |
return false; |
a6d44e982
|
2762 |
} |
6d048f531
|
2763 |
static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
22e2c507c
|
2764 |
{ |
1792669cc
|
2765 |
struct cfq_queue *cfqq = cfqd->active_queue; |
e795421e4
|
2766 |
struct cfq_rb_root *st = cfqq->service_tree; |
c58698073
|
2767 |
struct cfq_io_cq *cic; |
9a7f38c42
|
2768 2769 |
u64 sl, group_idle = 0; u64 now = ktime_get_ns(); |
7b14e3b52
|
2770 |
|
a68bbddba
|
2771 |
/* |
f7d7b7a7a
|
2772 2773 2774 |
* SSD device without seek penalty, disable idling. But only do so * for devices that support queuing, otherwise we still have a problem * with sync vs async workloads. |
a68bbddba
|
2775 |
*/ |
f7d7b7a7a
|
2776 |
if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) |
a68bbddba
|
2777 |
return; |
dd67d0515
|
2778 |
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); |
6d048f531
|
2779 |
WARN_ON(cfq_cfqq_slice_new(cfqq)); |
22e2c507c
|
2780 2781 2782 2783 |
/* * idle is disabled, either manually or by past process history */ |
80bdf0c78
|
2784 2785 2786 2787 2788 2789 2790 |
if (!cfq_should_idle(cfqd, cfqq)) { /* no queue idling. Check for group idling */ if (cfqd->cfq_group_idle) group_idle = cfqd->cfq_group_idle; else return; } |
6d048f531
|
2791 |
|
22e2c507c
|
2792 |
/* |
8e550632c
|
2793 |
* still active requests from this queue, don't idle |
7b679138b
|
2794 |
*/ |
8e550632c
|
2795 |
if (cfqq->dispatched) |
7b679138b
|
2796 2797 2798 |
return; /* |
22e2c507c
|
2799 2800 |
* task has exited, don't wait */ |
206dc69b3
|
2801 |
cic = cfqd->active_cic; |
f6e8d01be
|
2802 |
if (!cic || !atomic_read(&cic->icq.ioc->active_ref)) |
6d048f531
|
2803 |
return; |
355b659c8
|
2804 2805 2806 2807 2808 |
/* * If our average think time is larger than the remaining time * slice, then don't idle. This avoids overrunning the allotted * time slice. */ |
383cd7213
|
2809 |
if (sample_valid(cic->ttime.ttime_samples) && |
9a7f38c42
|
2810 2811 |
(cfqq->slice_end - now < cic->ttime.ttime_mean)) { cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu", |
383cd7213
|
2812 |
cic->ttime.ttime_mean); |
355b659c8
|
2813 |
return; |
b1ffe737f
|
2814 |
} |
355b659c8
|
2815 |
|
e795421e4
|
2816 2817 2818 2819 2820 2821 2822 |
/* * There are other queues in the group or this is the only group and * it has too big thinktime, don't do group idle. */ if (group_idle && (cfqq->cfqg->nr_cfqq > 1 || cfq_io_thinktime_big(cfqd, &st->ttime, true))) |
80bdf0c78
|
2823 |
return; |
3b18152c3
|
2824 |
cfq_mark_cfqq_wait_request(cfqq); |
22e2c507c
|
2825 |
|
80bdf0c78
|
2826 2827 2828 2829 |
if (group_idle) sl = cfqd->cfq_group_idle; else sl = cfqd->cfq_slice_idle; |
206dc69b3
|
2830 |
|
911483258
|
2831 2832 |
hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl), HRTIMER_MODE_REL); |
155fead9b
|
2833 |
cfqg_stats_set_start_idle_time(cfqq->cfqg); |
9a7f38c42
|
2834 |
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl, |
80bdf0c78
|
2835 |
group_idle ? 1 : 0); |
1da177e4c
|
2836 |
} |
498d3aa2b
|
2837 2838 2839 |
/* * Move request from internal lists to the request queue dispatch list. */ |
165125e1e
|
2840 |
static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) |
1da177e4c
|
2841 |
{ |
3ed9a2965
|
2842 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
5e7053747
|
2843 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
22e2c507c
|
2844 |
|
7b679138b
|
2845 |
cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); |
06d218864
|
2846 |
cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); |
5380a101d
|
2847 |
cfq_remove_request(rq); |
6d048f531
|
2848 |
cfqq->dispatched++; |
80bdf0c78
|
2849 |
(RQ_CFQG(rq))->dispatched++; |
5380a101d
|
2850 |
elv_dispatch_sort(q, rq); |
3ed9a2965
|
2851 |
|
53c583d22
|
2852 |
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; |
c4e7893eb
|
2853 |
cfqq->nr_sectors += blk_rq_sectors(rq); |
1da177e4c
|
2854 2855 2856 2857 2858 |
} /* * return expired entry, or NULL to just start from scratch in rbtree */ |
febffd618
|
2859 |
static struct request *cfq_check_fifo(struct cfq_queue *cfqq) |
1da177e4c
|
2860 |
{ |
30996f40b
|
2861 |
struct request *rq = NULL; |
1da177e4c
|
2862 |
|
3b18152c3
|
2863 |
if (cfq_cfqq_fifo_expire(cfqq)) |
1da177e4c
|
2864 |
return NULL; |
cb8874119
|
2865 2866 |
cfq_mark_cfqq_fifo_expire(cfqq); |
89850f7ee
|
2867 2868 |
if (list_empty(&cfqq->fifo)) return NULL; |
1da177e4c
|
2869 |
|
89850f7ee
|
2870 |
rq = rq_entry_fifo(cfqq->fifo.next); |
9a7f38c42
|
2871 |
if (ktime_get_ns() < rq->fifo_time) |
7b679138b
|
2872 |
rq = NULL; |
1da177e4c
|
2873 |
|
6d048f531
|
2874 |
return rq; |
1da177e4c
|
2875 |
} |
22e2c507c
|
2876 2877 2878 2879 |
static inline int cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { const int base_rq = cfqd->cfq_slice_async_rq; |
1da177e4c
|
2880 |
|
22e2c507c
|
2881 |
WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); |
1da177e4c
|
2882 |
|
b9f8ce059
|
2883 |
return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio); |
1da177e4c
|
2884 |
} |
22e2c507c
|
2885 |
/* |
df5fe3e8e
|
2886 2887 2888 2889 2890 2891 2892 |
* Must be called with the queue_lock held. */ static int cfqq_process_refs(struct cfq_queue *cfqq) { int process_refs, io_refs; io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; |
30d7b9448
|
2893 |
process_refs = cfqq->ref - io_refs; |
df5fe3e8e
|
2894 2895 2896 2897 2898 2899 |
BUG_ON(process_refs < 0); return process_refs; } static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) { |
e6c5bc737
|
2900 |
int process_refs, new_process_refs; |
df5fe3e8e
|
2901 |
struct cfq_queue *__cfqq; |
c10b61f09
|
2902 2903 2904 2905 2906 2907 2908 2909 |
/* * If there are no process references on the new_cfqq, then it is * unsafe to follow the ->new_cfqq chain as other cfqq's in the * chain may have dropped their last reference (not just their * last process reference). */ if (!cfqq_process_refs(new_cfqq)) return; |
df5fe3e8e
|
2910 2911 2912 2913 2914 2915 2916 2917 |
/* Avoid a circular list and skip interim queue merges */ while ((__cfqq = new_cfqq->new_cfqq)) { if (__cfqq == cfqq) return; new_cfqq = __cfqq; } process_refs = cfqq_process_refs(cfqq); |
c10b61f09
|
2918 |
new_process_refs = cfqq_process_refs(new_cfqq); |
df5fe3e8e
|
2919 2920 2921 2922 |
/* * If the process for the cfqq has gone away, there is no * sense in merging the queues. */ |
c10b61f09
|
2923 |
if (process_refs == 0 || new_process_refs == 0) |
df5fe3e8e
|
2924 |
return; |
e6c5bc737
|
2925 2926 2927 |
/* * Merge in the direction of the lesser amount of work. */ |
e6c5bc737
|
2928 2929 |
if (new_process_refs >= process_refs) { cfqq->new_cfqq = new_cfqq; |
30d7b9448
|
2930 |
new_cfqq->ref += process_refs; |
e6c5bc737
|
2931 2932 |
} else { new_cfqq->new_cfqq = cfqq; |
30d7b9448
|
2933 |
cfqq->ref += new_process_refs; |
e6c5bc737
|
2934 |
} |
df5fe3e8e
|
2935 |
} |
6d816ec7c
|
2936 |
static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd, |
3bf10fea3
|
2937 |
struct cfq_group *cfqg, enum wl_class_t wl_class) |
718eee057
|
2938 2939 2940 2941 |
{ struct cfq_queue *queue; int i; bool key_valid = false; |
9a7f38c42
|
2942 |
u64 lowest_key = 0; |
718eee057
|
2943 |
enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; |
65b32a573
|
2944 2945 |
for (i = 0; i <= SYNC_WORKLOAD; ++i) { /* select the one with lowest rb_key */ |
34b98d03b
|
2946 |
queue = cfq_rb_first(st_for(cfqg, wl_class, i)); |
718eee057
|
2947 |
if (queue && |
9a7f38c42
|
2948 |
(!key_valid || queue->rb_key < lowest_key)) { |
718eee057
|
2949 2950 2951 2952 2953 2954 2955 2956 |
lowest_key = queue->rb_key; cur_best = i; key_valid = true; } } return cur_best; } |
6d816ec7c
|
2957 2958 |
static void choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) |
718eee057
|
2959 |
{ |
9a7f38c42
|
2960 |
u64 slice; |
718eee057
|
2961 |
unsigned count; |
cdb16e8f7
|
2962 |
struct cfq_rb_root *st; |
9a7f38c42
|
2963 |
u64 group_slice; |
4d2ceea4c
|
2964 |
enum wl_class_t original_class = cfqd->serving_wl_class; |
9a7f38c42
|
2965 |
u64 now = ktime_get_ns(); |
1fa8f6d68
|
2966 |
|
718eee057
|
2967 |
/* Choose next priority. RT > BE > IDLE */ |
58ff82f34
|
2968 |
if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) |
4d2ceea4c
|
2969 |
cfqd->serving_wl_class = RT_WORKLOAD; |
58ff82f34
|
2970 |
else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) |
4d2ceea4c
|
2971 |
cfqd->serving_wl_class = BE_WORKLOAD; |
718eee057
|
2972 |
else { |
4d2ceea4c
|
2973 |
cfqd->serving_wl_class = IDLE_WORKLOAD; |
9a7f38c42
|
2974 |
cfqd->workload_expires = now + jiffies_to_nsecs(1); |
718eee057
|
2975 2976 |
return; } |
4d2ceea4c
|
2977 |
if (original_class != cfqd->serving_wl_class) |
e4ea0c16a
|
2978 |
goto new_workload; |
718eee057
|
2979 2980 2981 2982 2983 |
/* * For RT and BE, we have to choose also the type * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload * expiration time */ |
34b98d03b
|
2984 |
st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); |
cdb16e8f7
|
2985 |
count = st->count; |
718eee057
|
2986 2987 |
/* |
65b32a573
|
2988 |
* check workload expiration, and that we still have other queues ready |
718eee057
|
2989 |
*/ |
9a7f38c42
|
2990 |
if (count && !(now > cfqd->workload_expires)) |
718eee057
|
2991 |
return; |
e4ea0c16a
|
2992 |
new_workload: |
718eee057
|
2993 |
/* otherwise select new workload type */ |
6d816ec7c
|
2994 |
cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, |
4d2ceea4c
|
2995 |
cfqd->serving_wl_class); |
34b98d03b
|
2996 |
st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); |
cdb16e8f7
|
2997 |
count = st->count; |
718eee057
|
2998 2999 3000 3001 3002 3003 |
/* * the workload slice is computed as a fraction of target latency * proportional to the number of queues in that workload, over * all the queues in the same priority class */ |
58ff82f34
|
3004 |
group_slice = cfq_group_slice(cfqd, cfqg); |
9a7f38c42
|
3005 |
slice = div_u64(group_slice * count, |
4d2ceea4c
|
3006 3007 |
max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, |
9a7f38c42
|
3008 |
cfqg))); |
718eee057
|
3009 |
|
4d2ceea4c
|
3010 |
if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { |
9a7f38c42
|
3011 |
u64 tmp; |
f26bd1f0a
|
3012 3013 3014 3015 3016 3017 3018 3019 |
/* * Async queues are currently system wide. Just taking * proportion of queues with-in same group will lead to higher * async ratio system wide as generally root group is going * to have higher weight. A more accurate thing would be to * calculate system wide asnc/sync ratio. */ |
5bf14c072
|
3020 3021 |
tmp = cfqd->cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); |
9a7f38c42
|
3022 3023 |
tmp = div_u64(tmp, cfqd->busy_queues); slice = min_t(u64, slice, tmp); |
f26bd1f0a
|
3024 |
|
718eee057
|
3025 3026 |
/* async workload slice is scaled down according to * the sync/async slice ratio. */ |
9a7f38c42
|
3027 |
slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]); |
f26bd1f0a
|
3028 |
} else |
718eee057
|
3029 3030 |
/* sync workload slice is at least 2 * cfq_slice_idle */ slice = max(slice, 2 * cfqd->cfq_slice_idle); |
9a7f38c42
|
3031 3032 3033 |
slice = max_t(u64, slice, CFQ_MIN_TT); cfq_log(cfqd, "workload slice:%llu", slice); cfqd->workload_expires = now + slice; |
718eee057
|
3034 |
} |
1fa8f6d68
|
3035 3036 3037 |
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) { struct cfq_rb_root *st = &cfqd->grp_service_tree; |
25bc6b077
|
3038 |
struct cfq_group *cfqg; |
1fa8f6d68
|
3039 3040 3041 |
if (RB_EMPTY_ROOT(&st->rb)) return NULL; |
25bc6b077
|
3042 |
cfqg = cfq_rb_first_group(st); |
25bc6b077
|
3043 3044 |
update_min_vdisktime(st); return cfqg; |
1fa8f6d68
|
3045 |
} |
cdb16e8f7
|
3046 3047 |
static void cfq_choose_cfqg(struct cfq_data *cfqd) { |
1fa8f6d68
|
3048 |
struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); |
9a7f38c42
|
3049 |
u64 now = ktime_get_ns(); |
1fa8f6d68
|
3050 3051 |
cfqd->serving_group = cfqg; |
dae739ebc
|
3052 3053 |
/* Restore the workload type data */ |
4d2ceea4c
|
3054 |
if (cfqg->saved_wl_slice) { |
9a7f38c42
|
3055 |
cfqd->workload_expires = now + cfqg->saved_wl_slice; |
4d2ceea4c
|
3056 3057 |
cfqd->serving_wl_type = cfqg->saved_wl_type; cfqd->serving_wl_class = cfqg->saved_wl_class; |
66ae29197
|
3058 |
} else |
9a7f38c42
|
3059 |
cfqd->workload_expires = now - 1; |
66ae29197
|
3060 |
|
6d816ec7c
|
3061 |
choose_wl_class_and_type(cfqd, cfqg); |
cdb16e8f7
|
3062 |
} |
df5fe3e8e
|
3063 |
/* |
498d3aa2b
|
3064 3065 |
* Select a queue for service. If we have a current active queue, * check whether to continue servicing it, or retrieve and set a new one. |
22e2c507c
|
3066 |
*/ |
1b5ed5e1f
|
3067 |
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) |
1da177e4c
|
3068 |
{ |
a36e71f99
|
3069 |
struct cfq_queue *cfqq, *new_cfqq = NULL; |
9a7f38c42
|
3070 |
u64 now = ktime_get_ns(); |
1da177e4c
|
3071 |
|
22e2c507c
|
3072 3073 3074 |
cfqq = cfqd->active_queue; if (!cfqq) goto new_queue; |
1da177e4c
|
3075 |
|
f04a64246
|
3076 3077 |
if (!cfqd->rq_queued) return NULL; |
c244bb50a
|
3078 3079 3080 3081 3082 3083 |
/* * We were waiting for group to get backlogged. Expire the queue */ if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) goto expire; |
22e2c507c
|
3084 |
/* |
6d048f531
|
3085 |
* The active queue has run out of time, expire it and select new. |
22e2c507c
|
3086 |
*/ |
7667aa063
|
3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 |
if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { /* * If slice had not expired at the completion of last request * we might not have turned on wait_busy flag. Don't expire * the queue yet. Allow the group to get backlogged. * * The very fact that we have used the slice, that means we * have been idling all along on this queue and it should be * ok to wait for this request to complete. */ |
82bbbf28d
|
3097 3098 3099 |
if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfqq = NULL; |
7667aa063
|
3100 |
goto keep_queue; |
82bbbf28d
|
3101 |
} else |
80bdf0c78
|
3102 |
goto check_group_idle; |
7667aa063
|
3103 |
} |
1da177e4c
|
3104 |
|
22e2c507c
|
3105 |
/* |
6d048f531
|
3106 3107 |
* The active queue has requests and isn't expired, allow it to * dispatch. |
22e2c507c
|
3108 |
*/ |
dd67d0515
|
3109 |
if (!RB_EMPTY_ROOT(&cfqq->sort_list)) |
22e2c507c
|
3110 |
goto keep_queue; |
6d048f531
|
3111 3112 |
/* |
a36e71f99
|
3113 3114 3115 |
* If another queue has a request waiting within our mean seek * distance, let it run. The expire code will check for close * cooperators and put the close queue at the front of the service |
df5fe3e8e
|
3116 |
* tree. If possible, merge the expiring queue with the new cfqq. |
a36e71f99
|
3117 |
*/ |
b3b6d0408
|
3118 |
new_cfqq = cfq_close_cooperator(cfqd, cfqq); |
df5fe3e8e
|
3119 3120 3121 |
if (new_cfqq) { if (!cfqq->new_cfqq) cfq_setup_merge(cfqq, new_cfqq); |
a36e71f99
|
3122 |
goto expire; |
df5fe3e8e
|
3123 |
} |
a36e71f99
|
3124 3125 |
/* |
6d048f531
|
3126 3127 3128 3129 |
* No requests pending. If the active queue still has requests in * flight or is idling for a new request, allow either of these * conditions to happen (or time out) before selecting a new queue. */ |
911483258
|
3130 |
if (hrtimer_active(&cfqd->idle_slice_timer)) { |
80bdf0c78
|
3131 3132 3133 |
cfqq = NULL; goto keep_queue; } |
8e1ac6655
|
3134 3135 3136 3137 3138 3139 |
/* * This is a deep seek queue, but the device is much faster than * the queue can deliver, don't idle **/ if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && (cfq_cfqq_slice_new(cfqq) || |
9a7f38c42
|
3140 |
(cfqq->slice_end - now > now - cfqq->slice_start))) { |
8e1ac6655
|
3141 3142 3143 |
cfq_clear_cfqq_deep(cfqq); cfq_clear_cfqq_idle_window(cfqq); } |
80bdf0c78
|
3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 |
if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfqq = NULL; goto keep_queue; } /* * If group idle is enabled and there are requests dispatched from * this group, wait for requests to complete. */ check_group_idle: |
7700fc4f6
|
3154 3155 3156 |
if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && cfqq->cfqg->dispatched && !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { |
caaa5f9f0
|
3157 3158 |
cfqq = NULL; goto keep_queue; |
22e2c507c
|
3159 |
} |
3b18152c3
|
3160 |
expire: |
e5ff082e8
|
3161 |
cfq_slice_expired(cfqd, 0); |
3b18152c3
|
3162 |
new_queue: |
718eee057
|
3163 3164 3165 3166 3167 |
/* * Current queue expired. Check if we have to switch to a new * service tree */ if (!new_cfqq) |
cdb16e8f7
|
3168 |
cfq_choose_cfqg(cfqd); |
718eee057
|
3169 |
|
a36e71f99
|
3170 |
cfqq = cfq_set_active_queue(cfqd, new_cfqq); |
22e2c507c
|
3171 |
keep_queue: |
3b18152c3
|
3172 |
return cfqq; |
22e2c507c
|
3173 |
} |
febffd618
|
3174 |
static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) |
d9e7620e6
|
3175 3176 3177 3178 3179 3180 3181 3182 3183 |
{ int dispatched = 0; while (cfqq->next_rq) { cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); dispatched++; } BUG_ON(!list_empty(&cfqq->fifo)); |
f04a64246
|
3184 3185 |
/* By default cfqq is not expired if it is empty. Do it explicitly */ |
e5ff082e8
|
3186 |
__cfq_slice_expired(cfqq->cfqd, cfqq, 0); |
d9e7620e6
|
3187 3188 |
return dispatched; } |
498d3aa2b
|
3189 3190 3191 3192 |
/* * Drain our current requests. Used for barriers and when switching * io schedulers on-the-fly. */ |
d9e7620e6
|
3193 |
static int cfq_forced_dispatch(struct cfq_data *cfqd) |
1b5ed5e1f
|
3194 |
{ |
0871714e0
|
3195 |
struct cfq_queue *cfqq; |
d9e7620e6
|
3196 |
int dispatched = 0; |
cdb16e8f7
|
3197 |
|
3440c49f5
|
3198 |
/* Expire the timeslice of the current active queue first */ |
e5ff082e8
|
3199 |
cfq_slice_expired(cfqd, 0); |
3440c49f5
|
3200 3201 |
while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { __cfq_set_active_queue(cfqd, cfqq); |
f04a64246
|
3202 |
dispatched += __cfq_forced_dispatch_cfqq(cfqq); |
3440c49f5
|
3203 |
} |
1b5ed5e1f
|
3204 |
|
1b5ed5e1f
|
3205 |
BUG_ON(cfqd->busy_queues); |
6923715ae
|
3206 |
cfq_log(cfqd, "forced_dispatch=%d", dispatched); |
1b5ed5e1f
|
3207 3208 |
return dispatched; } |
abc3c744d
|
3209 3210 3211 |
static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, struct cfq_queue *cfqq) { |
9a7f38c42
|
3212 |
u64 now = ktime_get_ns(); |
abc3c744d
|
3213 3214 |
/* the queue hasn't finished any request, can't estimate */ if (cfq_cfqq_slice_new(cfqq)) |
c1e44756f
|
3215 |
return true; |
9a7f38c42
|
3216 |
if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end) |
c1e44756f
|
3217 |
return true; |
abc3c744d
|
3218 |
|
c1e44756f
|
3219 |
return false; |
abc3c744d
|
3220 |
} |
0b182d617
|
3221 |
static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
2f5cb7381
|
3222 |
{ |
2f5cb7381
|
3223 |
unsigned int max_dispatch; |
22e2c507c
|
3224 |
|
3932a86b4
|
3225 3226 |
if (cfq_cfqq_must_dispatch(cfqq)) return true; |
2f5cb7381
|
3227 |
/* |
5ad531db6
|
3228 3229 |
* Drain async requests before we start sync IO */ |
53c583d22
|
3230 |
if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) |
0b182d617
|
3231 |
return false; |
5ad531db6
|
3232 3233 |
/* |
2f5cb7381
|
3234 3235 |
* If this is an async queue and we have sync IO in flight, let it wait */ |
53c583d22
|
3236 |
if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) |
0b182d617
|
3237 |
return false; |
2f5cb7381
|
3238 |
|
abc3c744d
|
3239 |
max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); |
2f5cb7381
|
3240 3241 |
if (cfq_class_idle(cfqq)) max_dispatch = 1; |
b4878f245
|
3242 |
|
2f5cb7381
|
3243 3244 3245 3246 |
/* * Does this cfqq already have too much IO in flight? */ if (cfqq->dispatched >= max_dispatch) { |
ef8a41df8
|
3247 |
bool promote_sync = false; |
2f5cb7381
|
3248 3249 3250 |
/* * idle queue must always only have a single IO in flight */ |
3ed9a2965
|
3251 |
if (cfq_class_idle(cfqq)) |
0b182d617
|
3252 |
return false; |
3ed9a2965
|
3253 |
|
2f5cb7381
|
3254 |
/* |
c4ade94fc
|
3255 3256 |
* If there is only one sync queue * we can ignore async queue here and give the sync |
ef8a41df8
|
3257 3258 3259 3260 |
* queue no dispatch limit. The reason is a sync queue can * preempt async queue, limiting the sync queue doesn't make * sense. This is useful for aiostress test. */ |
c4ade94fc
|
3261 3262 |
if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) promote_sync = true; |
ef8a41df8
|
3263 3264 |
/* |
2f5cb7381
|
3265 3266 |
* We have other queues, don't allow more IO from this one */ |
ef8a41df8
|
3267 3268 |
if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && !promote_sync) |
0b182d617
|
3269 |
return false; |
9ede209e8
|
3270 |
|
2f5cb7381
|
3271 |
/* |
474b18ccc
|
3272 |
* Sole queue user, no limit |
365722bb9
|
3273 |
*/ |
ef8a41df8
|
3274 |
if (cfqd->busy_queues == 1 || promote_sync) |
abc3c744d
|
3275 3276 3277 3278 3279 3280 3281 3282 3283 |
max_dispatch = -1; else /* * Normally we start throttling cfqq when cfq_quantum/2 * requests have been dispatched. But we can drive * deeper queue depths at the beginning of slice * subjected to upper limit of cfq_quantum. * */ max_dispatch = cfqd->cfq_quantum; |
8e2967555
|
3284 3285 3286 3287 3288 3289 3290 |
} /* * Async queues must wait a bit before being allowed dispatch. * We also ramp up the dispatch depth gradually for async IO, * based on the last sync IO we serviced */ |
963b72fc6
|
3291 |
if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { |
9a7f38c42
|
3292 |
u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync; |
8e2967555
|
3293 |
unsigned int depth; |
365722bb9
|
3294 |
|
9a7f38c42
|
3295 |
depth = div64_u64(last_sync, cfqd->cfq_slice[1]); |
e00c54c36
|
3296 3297 |
if (!depth && !cfqq->dispatched) depth = 1; |
8e2967555
|
3298 3299 |
if (depth < max_dispatch) max_dispatch = depth; |
2f5cb7381
|
3300 |
} |
3ed9a2965
|
3301 |
|
0b182d617
|
3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 |
/* * If we're below the current max, allow a dispatch */ return cfqq->dispatched < max_dispatch; } /* * Dispatch a request from cfqq, moving them to the request queue * dispatch list. */ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct request *rq; BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); |
3932a86b4
|
3317 3318 3319 |
rq = cfq_check_fifo(cfqq); if (rq) cfq_mark_cfqq_must_dispatch(cfqq); |
0b182d617
|
3320 3321 3322 3323 3324 3325 |
if (!cfq_may_dispatch(cfqd, cfqq)) return false; /* * follow expired path, else get first next available */ |
0b182d617
|
3326 3327 |
if (!rq) rq = cfqq->next_rq; |
3932a86b4
|
3328 3329 |
else cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); |
0b182d617
|
3330 3331 3332 3333 3334 3335 3336 |
/* * insert request into driver dispatch list */ cfq_dispatch_insert(cfqd->queue, rq); if (!cfqd->active_cic) { |
c58698073
|
3337 |
struct cfq_io_cq *cic = RQ_CIC(rq); |
0b182d617
|
3338 |
|
c58698073
|
3339 |
atomic_long_inc(&cic->icq.ioc->refcount); |
0b182d617
|
3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 |
cfqd->active_cic = cic; } return true; } /* * Find the cfqq that we need to service and move a request from that to the * dispatch list */ static int cfq_dispatch_requests(struct request_queue *q, int force) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq; if (!cfqd->busy_queues) return 0; if (unlikely(force)) return cfq_forced_dispatch(cfqd); cfqq = cfq_select_queue(cfqd); if (!cfqq) |
8e2967555
|
3363 |
return 0; |
2f5cb7381
|
3364 |
/* |
0b182d617
|
3365 |
* Dispatch a request from this cfqq, if it is allowed |
2f5cb7381
|
3366 |
*/ |
0b182d617
|
3367 3368 |
if (!cfq_dispatch_request(cfqd, cfqq)) return 0; |
2f5cb7381
|
3369 |
cfqq->slice_dispatch++; |
b029195dd
|
3370 |
cfq_clear_cfqq_must_dispatch(cfqq); |
22e2c507c
|
3371 |
|
2f5cb7381
|
3372 3373 3374 3375 3376 3377 3378 |
/* * expire an async queue immediately if it has used up its slice. idle * queue always expire after 1 dispatch round. */ if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || cfq_class_idle(cfqq))) { |
9a7f38c42
|
3379 |
cfqq->slice_end = ktime_get_ns() + 1; |
e5ff082e8
|
3380 |
cfq_slice_expired(cfqd, 0); |
1da177e4c
|
3381 |
} |
b217a903a
|
3382 |
cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); |
2f5cb7381
|
3383 |
return 1; |
1da177e4c
|
3384 |
} |
1da177e4c
|
3385 |
/* |
5e7053747
|
3386 3387 |
* task holds one reference to the queue, dropped when task exits. each rq * in-flight on this queue also holds a reference, dropped when rq is freed. |
1da177e4c
|
3388 |
* |
b1c357696
|
3389 |
* Each cfq queue took a reference on the parent group. Drop it now. |
1da177e4c
|
3390 3391 3392 3393 |
* queue lock must be held here. */ static void cfq_put_queue(struct cfq_queue *cfqq) { |
22e2c507c
|
3394 |
struct cfq_data *cfqd = cfqq->cfqd; |
0bbfeb832
|
3395 |
struct cfq_group *cfqg; |
22e2c507c
|
3396 |
|
30d7b9448
|
3397 |
BUG_ON(cfqq->ref <= 0); |
1da177e4c
|
3398 |
|
30d7b9448
|
3399 3400 |
cfqq->ref--; if (cfqq->ref) |
1da177e4c
|
3401 |
return; |
7b679138b
|
3402 |
cfq_log_cfqq(cfqd, cfqq, "put_queue"); |
1da177e4c
|
3403 |
BUG_ON(rb_first(&cfqq->sort_list)); |
22e2c507c
|
3404 |
BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); |
b1c357696
|
3405 |
cfqg = cfqq->cfqg; |
1da177e4c
|
3406 |
|
28f95cbc3
|
3407 |
if (unlikely(cfqd->active_queue == cfqq)) { |
e5ff082e8
|
3408 |
__cfq_slice_expired(cfqd, cfqq, 0); |
23e018a1b
|
3409 |
cfq_schedule_dispatch(cfqd); |
28f95cbc3
|
3410 |
} |
22e2c507c
|
3411 |
|
f04a64246
|
3412 |
BUG_ON(cfq_cfqq_on_rr(cfqq)); |
1da177e4c
|
3413 |
kmem_cache_free(cfq_pool, cfqq); |
eb7d8c07f
|
3414 |
cfqg_put(cfqg); |
1da177e4c
|
3415 |
} |
d02a2c077
|
3416 |
static void cfq_put_cooperator(struct cfq_queue *cfqq) |
1da177e4c
|
3417 |
{ |
df5fe3e8e
|
3418 |
struct cfq_queue *__cfqq, *next; |
df5fe3e8e
|
3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 |
/* * If this queue was scheduled to merge with another queue, be * sure to drop the reference taken on that queue (and others in * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. */ __cfqq = cfqq->new_cfqq; while (__cfqq) { if (__cfqq == cfqq) { WARN(1, "cfqq->new_cfqq loop detected "); break; } next = __cfqq->new_cfqq; cfq_put_queue(__cfqq); __cfqq = next; } |
d02a2c077
|
3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 |
} static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { if (unlikely(cfqq == cfqd->active_queue)) { __cfq_slice_expired(cfqd, cfqq, 0); cfq_schedule_dispatch(cfqd); } cfq_put_cooperator(cfqq); |
df5fe3e8e
|
3445 |
|
89850f7ee
|
3446 3447 |
cfq_put_queue(cfqq); } |
22e2c507c
|
3448 |
|
9b84cacd0
|
3449 3450 3451 |
static void cfq_init_icq(struct io_cq *icq) { struct cfq_io_cq *cic = icq_to_cic(icq); |
9a7f38c42
|
3452 |
cic->ttime.last_end_request = ktime_get_ns(); |
9b84cacd0
|
3453 |
} |
c58698073
|
3454 |
static void cfq_exit_icq(struct io_cq *icq) |
89850f7ee
|
3455 |
{ |
c58698073
|
3456 |
struct cfq_io_cq *cic = icq_to_cic(icq); |
283287a52
|
3457 |
struct cfq_data *cfqd = cic_to_cfqd(cic); |
4faa3c815
|
3458 |
|
563180a44
|
3459 3460 3461 |
if (cic_to_cfqq(cic, false)) { cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false)); cic_set_cfqq(cic, NULL, false); |
12a057321
|
3462 |
} |
563180a44
|
3463 3464 3465 |
if (cic_to_cfqq(cic, true)) { cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true)); cic_set_cfqq(cic, NULL, true); |
12a057321
|
3466 |
} |
89850f7ee
|
3467 |
} |
abede6da2
|
3468 |
static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic) |
22e2c507c
|
3469 3470 3471 |
{ struct task_struct *tsk = current; int ioprio_class; |
3b18152c3
|
3472 |
if (!cfq_cfqq_prio_changed(cfqq)) |
22e2c507c
|
3473 |
return; |
598971bfb
|
3474 |
ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); |
22e2c507c
|
3475 |
switch (ioprio_class) { |
fe094d98e
|
3476 3477 3478 3479 3480 |
default: printk(KERN_ERR "cfq: bad prio %x ", ioprio_class); case IOPRIO_CLASS_NONE: /* |
6d63c2755
|
3481 |
* no prio set, inherit CPU scheduling settings |
fe094d98e
|
3482 3483 |
*/ cfqq->ioprio = task_nice_ioprio(tsk); |
6d63c2755
|
3484 |
cfqq->ioprio_class = task_nice_ioclass(tsk); |
fe094d98e
|
3485 3486 |
break; case IOPRIO_CLASS_RT: |
598971bfb
|
3487 |
cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); |
fe094d98e
|
3488 3489 3490 |
cfqq->ioprio_class = IOPRIO_CLASS_RT; break; case IOPRIO_CLASS_BE: |
598971bfb
|
3491 |
cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); |
fe094d98e
|
3492 3493 3494 3495 3496 3497 3498 |
cfqq->ioprio_class = IOPRIO_CLASS_BE; break; case IOPRIO_CLASS_IDLE: cfqq->ioprio_class = IOPRIO_CLASS_IDLE; cfqq->ioprio = 7; cfq_clear_cfqq_idle_window(cfqq); break; |
22e2c507c
|
3499 3500 3501 3502 3503 3504 3505 |
} /* * keep track of original prio settings in case we have to temporarily * elevate the priority of this queue */ cfqq->org_ioprio = cfqq->ioprio; |
b8269db45
|
3506 |
cfqq->org_ioprio_class = cfqq->ioprio_class; |
3b18152c3
|
3507 |
cfq_clear_cfqq_prio_changed(cfqq); |
22e2c507c
|
3508 |
} |
598971bfb
|
3509 |
static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio) |
22e2c507c
|
3510 |
{ |
598971bfb
|
3511 |
int ioprio = cic->icq.ioc->ioprio; |
bca4b914b
|
3512 |
struct cfq_data *cfqd = cic_to_cfqd(cic); |
478a82b0e
|
3513 |
struct cfq_queue *cfqq; |
35e6077cb
|
3514 |
|
598971bfb
|
3515 3516 3517 3518 3519 |
/* * Check whether ioprio has changed. The condition may trigger * spuriously on a newly created cic but there's no harm. */ if (unlikely(!cfqd) || likely(cic->ioprio == ioprio)) |
caaa5f9f0
|
3520 |
return; |
563180a44
|
3521 |
cfqq = cic_to_cfqq(cic, false); |
caaa5f9f0
|
3522 |
if (cfqq) { |
563180a44
|
3523 |
cfq_put_queue(cfqq); |
2da8de0bb
|
3524 |
cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio); |
563180a44
|
3525 |
cic_set_cfqq(cic, cfqq, false); |
22e2c507c
|
3526 |
} |
caaa5f9f0
|
3527 |
|
563180a44
|
3528 |
cfqq = cic_to_cfqq(cic, true); |
caaa5f9f0
|
3529 3530 |
if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); |
598971bfb
|
3531 3532 |
cic->ioprio = ioprio; |
22e2c507c
|
3533 |
} |
d5036d770
|
3534 |
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
a6151c3a5
|
3535 |
pid_t pid, bool is_sync) |
d5036d770
|
3536 3537 3538 3539 |
{ RB_CLEAR_NODE(&cfqq->rb_node); RB_CLEAR_NODE(&cfqq->p_node); INIT_LIST_HEAD(&cfqq->fifo); |
30d7b9448
|
3540 |
cfqq->ref = 0; |
d5036d770
|
3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 |
cfqq->cfqd = cfqd; cfq_mark_cfqq_prio_changed(cfqq); if (is_sync) { if (!cfq_class_idle(cfqq)) cfq_mark_cfqq_idle_window(cfqq); cfq_mark_cfqq_sync(cfqq); } cfqq->pid = pid; } |
24610333d
|
3552 |
#ifdef CONFIG_CFQ_GROUP_IOSCHED |
598971bfb
|
3553 |
static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) |
24610333d
|
3554 |
{ |
bca4b914b
|
3555 |
struct cfq_data *cfqd = cic_to_cfqd(cic); |
60a837077
|
3556 |
struct cfq_queue *cfqq; |
f4da80727
|
3557 |
uint64_t serial_nr; |
24610333d
|
3558 |
|
598971bfb
|
3559 |
rcu_read_lock(); |
f4da80727
|
3560 |
serial_nr = bio_blkcg(bio)->css.serial_nr; |
598971bfb
|
3561 |
rcu_read_unlock(); |
24610333d
|
3562 |
|
598971bfb
|
3563 3564 3565 3566 |
/* * Check whether blkcg has changed. The condition may trigger * spuriously on a newly created cic but there's no harm. */ |
f4da80727
|
3567 |
if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr)) |
598971bfb
|
3568 |
return; |
24610333d
|
3569 |
|
60a837077
|
3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 |
/* * Drop reference to queues. New queues will be assigned in new * group upon arrival of fresh requests. */ cfqq = cic_to_cfqq(cic, false); if (cfqq) { cfq_log_cfqq(cfqd, cfqq, "changed cgroup"); cic_set_cfqq(cic, NULL, false); cfq_put_queue(cfqq); } cfqq = cic_to_cfqq(cic, true); if (cfqq) { cfq_log_cfqq(cfqd, cfqq, "changed cgroup"); cic_set_cfqq(cic, NULL, true); cfq_put_queue(cfqq); |
24610333d
|
3586 |
} |
598971bfb
|
3587 |
|
f4da80727
|
3588 |
cic->blkcg_serial_nr = serial_nr; |
24610333d
|
3589 |
} |
598971bfb
|
3590 3591 |
#else static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { } |
24610333d
|
3592 |
#endif /* CONFIG_CFQ_GROUP_IOSCHED */ |
c2dea2d1f
|
3593 |
static struct cfq_queue ** |
60a837077
|
3594 |
cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio) |
c2dea2d1f
|
3595 |
{ |
fe094d98e
|
3596 |
switch (ioprio_class) { |
c2dea2d1f
|
3597 |
case IOPRIO_CLASS_RT: |
60a837077
|
3598 |
return &cfqg->async_cfqq[0][ioprio]; |
598971bfb
|
3599 3600 3601 |
case IOPRIO_CLASS_NONE: ioprio = IOPRIO_NORM; /* fall through */ |
c2dea2d1f
|
3602 |
case IOPRIO_CLASS_BE: |
60a837077
|
3603 |
return &cfqg->async_cfqq[1][ioprio]; |
c2dea2d1f
|
3604 |
case IOPRIO_CLASS_IDLE: |
60a837077
|
3605 |
return &cfqg->async_idle_cfqq; |
c2dea2d1f
|
3606 3607 3608 3609 |
default: BUG(); } } |
15c31be4d
|
3610 |
static struct cfq_queue * |
abede6da2
|
3611 |
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, |
2da8de0bb
|
3612 |
struct bio *bio) |
15c31be4d
|
3613 |
{ |
c6ce19432
|
3614 3615 |
int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); |
d4aad7ff0
|
3616 |
struct cfq_queue **async_cfqq = NULL; |
4ebc1c61d
|
3617 |
struct cfq_queue *cfqq; |
322731ed0
|
3618 3619 3620 |
struct cfq_group *cfqg; rcu_read_lock(); |
ae1188963
|
3621 |
cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio)); |
322731ed0
|
3622 3623 3624 3625 |
if (!cfqg) { cfqq = &cfqd->oom_cfqq; goto out; } |
15c31be4d
|
3626 |
|
c2dea2d1f
|
3627 |
if (!is_sync) { |
c6ce19432
|
3628 3629 3630 3631 3632 |
if (!ioprio_valid(cic->ioprio)) { struct task_struct *tsk = current; ioprio = task_nice_ioprio(tsk); ioprio_class = task_nice_ioclass(tsk); } |
60a837077
|
3633 |
async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio); |
c2dea2d1f
|
3634 |
cfqq = *async_cfqq; |
4ebc1c61d
|
3635 3636 |
if (cfqq) goto out; |
c2dea2d1f
|
3637 |
} |
d4aad7ff0
|
3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 |
cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO, cfqd->queue->node); if (!cfqq) { cfqq = &cfqd->oom_cfqq; goto out; } cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); cfq_init_prio_data(cfqq, cic); cfq_link_cfqq_cfqg(cfqq, cfqg); cfq_log_cfqq(cfqd, cfqq, "alloced"); |
15c31be4d
|
3649 |
|
d4aad7ff0
|
3650 3651 |
if (async_cfqq) { /* a new async queue is created, pin and remember */ |
30d7b9448
|
3652 |
cfqq->ref++; |
c2dea2d1f
|
3653 |
*async_cfqq = cfqq; |
15c31be4d
|
3654 |
} |
4ebc1c61d
|
3655 |
out: |
30d7b9448
|
3656 |
cfqq->ref++; |
322731ed0
|
3657 |
rcu_read_unlock(); |
15c31be4d
|
3658 3659 |
return cfqq; } |
22e2c507c
|
3660 |
static void |
9a7f38c42
|
3661 |
__cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle) |
1da177e4c
|
3662 |
{ |
9a7f38c42
|
3663 |
u64 elapsed = ktime_get_ns() - ttime->last_end_request; |
383cd7213
|
3664 |
elapsed = min(elapsed, 2UL * slice_idle); |
db3b5848e
|
3665 |
|
383cd7213
|
3666 |
ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8; |
9a7f38c42
|
3667 3668 3669 |
ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, ttime->ttime_samples); |
383cd7213
|
3670 3671 3672 3673 |
} static void cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
c58698073
|
3674 |
struct cfq_io_cq *cic) |
383cd7213
|
3675 |
{ |
f5f2b6ceb
|
3676 |
if (cfq_cfqq_sync(cfqq)) { |
383cd7213
|
3677 |
__cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle); |
f5f2b6ceb
|
3678 3679 3680 |
__cfq_update_io_thinktime(&cfqq->service_tree->ttime, cfqd->cfq_slice_idle); } |
7700fc4f6
|
3681 3682 3683 |
#ifdef CONFIG_CFQ_GROUP_IOSCHED __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); #endif |
22e2c507c
|
3684 |
} |
1da177e4c
|
3685 |
|
206dc69b3
|
3686 |
static void |
b2c18e1e0
|
3687 |
cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
6d048f531
|
3688 |
struct request *rq) |
206dc69b3
|
3689 |
{ |
3dde36dde
|
3690 |
sector_t sdist = 0; |
41647e7a9
|
3691 |
sector_t n_sec = blk_rq_sectors(rq); |
3dde36dde
|
3692 3693 3694 3695 3696 3697 |
if (cfqq->last_request_pos) { if (cfqq->last_request_pos < blk_rq_pos(rq)) sdist = blk_rq_pos(rq) - cfqq->last_request_pos; else sdist = cfqq->last_request_pos - blk_rq_pos(rq); } |
206dc69b3
|
3698 |
|
3dde36dde
|
3699 |
cfqq->seek_history <<= 1; |
41647e7a9
|
3700 3701 3702 3703 |
if (blk_queue_nonrot(cfqd->queue)) cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT); else cfqq->seek_history |= (sdist > CFQQ_SEEK_THR); |
206dc69b3
|
3704 |
} |
1da177e4c
|
3705 |
|
22e2c507c
|
3706 3707 3708 3709 3710 3711 |
/* * Disable idle window if the process thinks too long or seeks so much that * it doesn't matter */ static void cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
c58698073
|
3712 |
struct cfq_io_cq *cic) |
22e2c507c
|
3713 |
{ |
7b679138b
|
3714 |
int old_idle, enable_idle; |
1be92f2fc
|
3715 |
|
0871714e0
|
3716 3717 3718 3719 |
/* * Don't idle for async or idle io prio class */ if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) |
1be92f2fc
|
3720 |
return; |
c265a7f41
|
3721 |
enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); |
1da177e4c
|
3722 |
|
76280aff1
|
3723 3724 |
if (cfqq->queued[0] + cfqq->queued[1] >= 4) cfq_mark_cfqq_deep(cfqq); |
749ef9f84
|
3725 3726 |
if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) enable_idle = 0; |
f6e8d01be
|
3727 |
else if (!atomic_read(&cic->icq.ioc->active_ref) || |
c58698073
|
3728 3729 |
!cfqd->cfq_slice_idle || (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) |
22e2c507c
|
3730 |
enable_idle = 0; |
383cd7213
|
3731 3732 |
else if (sample_valid(cic->ttime.ttime_samples)) { if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle) |
22e2c507c
|
3733 3734 3735 |
enable_idle = 0; else enable_idle = 1; |
1da177e4c
|
3736 |
} |
7b679138b
|
3737 3738 3739 3740 3741 3742 3743 |
if (old_idle != enable_idle) { cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); if (enable_idle) cfq_mark_cfqq_idle_window(cfqq); else cfq_clear_cfqq_idle_window(cfqq); } |
22e2c507c
|
3744 |
} |
1da177e4c
|
3745 |
|
22e2c507c
|
3746 3747 3748 3749 |
/* * Check if new_cfqq should preempt the currently active queue. Return 0 for * no or if we aren't sure, a 1 will cause a preempt. */ |
a6151c3a5
|
3750 |
static bool |
22e2c507c
|
3751 |
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, |
5e7053747
|
3752 |
struct request *rq) |
22e2c507c
|
3753 |
{ |
6d048f531
|
3754 |
struct cfq_queue *cfqq; |
22e2c507c
|
3755 |
|
6d048f531
|
3756 3757 |
cfqq = cfqd->active_queue; if (!cfqq) |
a6151c3a5
|
3758 |
return false; |
22e2c507c
|
3759 |
|
6d048f531
|
3760 |
if (cfq_class_idle(new_cfqq)) |
a6151c3a5
|
3761 |
return false; |
22e2c507c
|
3762 3763 |
if (cfq_class_idle(cfqq)) |
a6151c3a5
|
3764 |
return true; |
1e3335de0
|
3765 |
|
22e2c507c
|
3766 |
/* |
875feb63b
|
3767 3768 3769 3770 3771 3772 |
* Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. */ if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) return false; /* |
374f84ac3
|
3773 3774 3775 |
* if the new request is sync, but the currently running queue is * not, let the sync request have priority. */ |
3932a86b4
|
3776 |
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) |
a6151c3a5
|
3777 |
return true; |
1e3335de0
|
3778 |
|
3984aa552
|
3779 3780 3781 3782 3783 3784 |
/* * Treat ancestors of current cgroup the same way as current cgroup. * For anybody else we disallow preemption to guarantee service * fairness among cgroups. */ if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg)) |
8682e1f15
|
3785 3786 3787 3788 |
return false; if (cfq_slice_used(cfqq)) return true; |
6c80731c7
|
3789 3790 3791 3792 3793 3794 3795 |
/* * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. */ if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) return true; WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class); |
8682e1f15
|
3796 |
/* Allow preemption only if we are idling on sync-noidle tree */ |
4d2ceea4c
|
3797 |
if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD && |
8682e1f15
|
3798 |
cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && |
8682e1f15
|
3799 3800 |
RB_EMPTY_ROOT(&cfqq->sort_list)) return true; |
374f84ac3
|
3801 |
/* |
b53d1ed73
|
3802 3803 3804 |
* So both queues are sync. Let the new request get disk time if * it's a metadata request and the current queue is doing regular IO. */ |
65299a3b7
|
3805 |
if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending) |
b53d1ed73
|
3806 |
return true; |
d2d59e18a
|
3807 3808 3809 |
/* An idle queue should not be idle now for some reason */ if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) return true; |
1e3335de0
|
3810 |
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) |
a6151c3a5
|
3811 |
return false; |
1e3335de0
|
3812 3813 3814 3815 3816 |
/* * if this request is as-good as one we would expect from the * current cfqq, let it preempt */ |
e9ce335df
|
3817 |
if (cfq_rq_close(cfqd, cfqq, rq)) |
a6151c3a5
|
3818 |
return true; |
1e3335de0
|
3819 |
|
a6151c3a5
|
3820 |
return false; |
22e2c507c
|
3821 3822 3823 3824 3825 3826 3827 3828 |
} /* * cfqq preempts the active queue. if we allowed preempt with no slice left, * let it have half of its nominal slice. */ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { |
df0793abb
|
3829 |
enum wl_type_t old_type = cfqq_type(cfqd->active_queue); |
7b679138b
|
3830 |
cfq_log_cfqq(cfqd, cfqq, "preempt"); |
df0793abb
|
3831 |
cfq_slice_expired(cfqd, 1); |
22e2c507c
|
3832 |
|
bf5722567
|
3833 |
/* |
f8ae6e3eb
|
3834 3835 3836 |
* workload type is changed, don't save slice, otherwise preempt * doesn't happen */ |
df0793abb
|
3837 |
if (old_type != cfqq_type(cfqq)) |
4d2ceea4c
|
3838 |
cfqq->cfqg->saved_wl_slice = 0; |
f8ae6e3eb
|
3839 3840 |
/* |
bf5722567
|
3841 3842 3843 3844 |
* Put the new queue at the front of the of the current list, * so we know that it will be selected next. */ BUG_ON(!cfq_cfqq_on_rr(cfqq)); |
edd75ffd9
|
3845 3846 |
cfq_service_tree_add(cfqd, cfqq, 1); |
eda5e0c91
|
3847 |
|
62a37f6ba
|
3848 3849 |
cfqq->slice_end = 0; cfq_mark_cfqq_slice_new(cfqq); |
22e2c507c
|
3850 3851 3852 |
} /* |
5e7053747
|
3853 |
* Called when a new fs request (rq) is added (to cfqq). Check if there's |
22e2c507c
|
3854 3855 3856 |
* something we should do about it */ static void |
5e7053747
|
3857 3858 |
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) |
22e2c507c
|
3859 |
{ |
c58698073
|
3860 |
struct cfq_io_cq *cic = RQ_CIC(rq); |
12e9fddd6
|
3861 |
|
45333d5a3
|
3862 |
cfqd->rq_queued++; |
65299a3b7
|
3863 3864 |
if (rq->cmd_flags & REQ_PRIO) cfqq->prio_pending++; |
374f84ac3
|
3865 |
|
383cd7213
|
3866 |
cfq_update_io_thinktime(cfqd, cfqq, cic); |
b2c18e1e0
|
3867 |
cfq_update_io_seektime(cfqd, cfqq, rq); |
9c2c38a12
|
3868 |
cfq_update_idle_window(cfqd, cfqq, cic); |
b2c18e1e0
|
3869 |
cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); |
22e2c507c
|
3870 3871 3872 |
if (cfqq == cfqd->active_queue) { /* |
b029195dd
|
3873 3874 3875 |
* Remember that we saw a request from this process, but * don't start queuing just yet. Otherwise we risk seeing lots * of tiny requests, because we disrupt the normal plugging |
d6ceb25e8
|
3876 3877 |
* and merging. If the request is already larger than a single * page, let it rip immediately. For that case we assume that |
2d8707229
|
3878 3879 3880 |
* merging is already done. Ditto for a busy system that * has other work pending, don't risk delaying until the * idle timer unplug to continue working. |
22e2c507c
|
3881 |
*/ |
d6ceb25e8
|
3882 |
if (cfq_cfqq_wait_request(cfqq)) { |
09cbfeaf1
|
3883 |
if (blk_rq_bytes(rq) > PAGE_SIZE || |
2d8707229
|
3884 |
cfqd->busy_queues > 1) { |
812df48d1
|
3885 |
cfq_del_timer(cfqd, cfqq); |
554554f60
|
3886 |
cfq_clear_cfqq_wait_request(cfqq); |
24ecfbe27
|
3887 |
__blk_run_queue(cfqd->queue); |
a11cdaa7a
|
3888 |
} else { |
155fead9b
|
3889 |
cfqg_stats_update_idle_time(cfqq->cfqg); |
bf7919371
|
3890 |
cfq_mark_cfqq_must_dispatch(cfqq); |
a11cdaa7a
|
3891 |
} |
d6ceb25e8
|
3892 |
} |
5e7053747
|
3893 |
} else if (cfq_should_preempt(cfqd, cfqq, rq)) { |
22e2c507c
|
3894 3895 3896 |
/* * not the active queue - expire current slice if it is * idle and has expired it's mean thinktime or this new queue |
3a9a3f6cc
|
3897 3898 |
* has some old slice time left and is of higher priority or * this new queue is RT and the current one is BE |
22e2c507c
|
3899 3900 |
*/ cfq_preempt_queue(cfqd, cfqq); |
24ecfbe27
|
3901 |
__blk_run_queue(cfqd->queue); |
22e2c507c
|
3902 |
} |
1da177e4c
|
3903 |
} |
165125e1e
|
3904 |
static void cfq_insert_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
3905 |
{ |
b4878f245
|
3906 |
struct cfq_data *cfqd = q->elevator->elevator_data; |
5e7053747
|
3907 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
22e2c507c
|
3908 |
|
7b679138b
|
3909 |
cfq_log_cfqq(cfqd, cfqq, "insert_request"); |
abede6da2
|
3910 |
cfq_init_prio_data(cfqq, RQ_CIC(rq)); |
1da177e4c
|
3911 |
|
9a7f38c42
|
3912 |
rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; |
22e2c507c
|
3913 |
list_add_tail(&rq->queuelist, &cfqq->fifo); |
aa6f6a3de
|
3914 |
cfq_add_rq_rb(rq); |
63a4cc248
|
3915 |
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq), |
155fead9b
|
3916 |
rq->cmd_flags); |
5e7053747
|
3917 |
cfq_rq_enqueued(cfqd, cfqq, rq); |
1da177e4c
|
3918 |
} |
45333d5a3
|
3919 3920 3921 3922 3923 3924 |
/* * Update hw_tag based on peak queue depth over 50 samples under * sufficient load. */ static void cfq_update_hw_tag(struct cfq_data *cfqd) { |
1a1238a7d
|
3925 |
struct cfq_queue *cfqq = cfqd->active_queue; |
53c583d22
|
3926 3927 |
if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) cfqd->hw_tag_est_depth = cfqd->rq_in_driver; |
e459dd08f
|
3928 3929 3930 |
if (cfqd->hw_tag == 1) return; |
45333d5a3
|
3931 3932 |
if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && |
53c583d22
|
3933 |
cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) |
45333d5a3
|
3934 |
return; |
1a1238a7d
|
3935 3936 3937 3938 3939 3940 3941 |
/* * If active queue hasn't enough requests and can idle, cfq might not * dispatch sufficient requests to hardware. Don't zero hw_tag in this * case */ if (cfqq && cfq_cfqq_idle_window(cfqq) && cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < |
53c583d22
|
3942 |
CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) |
1a1238a7d
|
3943 |
return; |
45333d5a3
|
3944 3945 |
if (cfqd->hw_tag_samples++ < 50) return; |
e459dd08f
|
3946 |
if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) |
45333d5a3
|
3947 3948 3949 |
cfqd->hw_tag = 1; else cfqd->hw_tag = 0; |
45333d5a3
|
3950 |
} |
7667aa063
|
3951 3952 |
static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) { |
c58698073
|
3953 |
struct cfq_io_cq *cic = cfqd->active_cic; |
9a7f38c42
|
3954 |
u64 now = ktime_get_ns(); |
7667aa063
|
3955 |
|
02a8f01b5
|
3956 3957 3958 |
/* If the queue already has requests, don't wait */ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) return false; |
7667aa063
|
3959 3960 3961 |
/* If there are other queues in the group, don't wait */ if (cfqq->cfqg->nr_cfqq > 1) return false; |
7700fc4f6
|
3962 3963 3964 |
/* the only queue in the group, but think time is big */ if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) return false; |
7667aa063
|
3965 3966 3967 3968 |
if (cfq_slice_used(cfqq)) return true; /* if slice left is less than think time, wait busy */ |
383cd7213
|
3969 |
if (cic && sample_valid(cic->ttime.ttime_samples) |
9a7f38c42
|
3970 |
&& (cfqq->slice_end - now < cic->ttime.ttime_mean)) |
7667aa063
|
3971 3972 3973 3974 3975 3976 3977 3978 3979 |
return true; /* * If think times is less than a jiffy than ttime_mean=0 and above * will not be true. It might happen that slice has not expired yet * but will expire soon (4-5 ns) during select_queue(). To cover the * case where think time is less than a jiffy, mark the queue wait * busy if only 1 jiffy is left in the slice. */ |
9a7f38c42
|
3980 |
if (cfqq->slice_end - now <= jiffies_to_nsecs(1)) |
7667aa063
|
3981 3982 3983 3984 |
return true; return false; } |
165125e1e
|
3985 |
static void cfq_completed_request(struct request_queue *q, struct request *rq) |
1da177e4c
|
3986 |
{ |
5e7053747
|
3987 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
b4878f245
|
3988 |
struct cfq_data *cfqd = cfqq->cfqd; |
5380a101d
|
3989 |
const int sync = rq_is_sync(rq); |
9a7f38c42
|
3990 |
u64 now = ktime_get_ns(); |
1da177e4c
|
3991 |
|
33659ebba
|
3992 3993 |
cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!(rq->cmd_flags & REQ_NOIDLE)); |
1da177e4c
|
3994 |
|
45333d5a3
|
3995 |
cfq_update_hw_tag(cfqd); |
53c583d22
|
3996 |
WARN_ON(!cfqd->rq_in_driver); |
6d048f531
|
3997 |
WARN_ON(!cfqq->dispatched); |
53c583d22
|
3998 |
cfqd->rq_in_driver--; |
6d048f531
|
3999 |
cfqq->dispatched--; |
80bdf0c78
|
4000 |
(RQ_CFQG(rq))->dispatched--; |
155fead9b
|
4001 |
cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), |
63a4cc248
|
4002 4003 |
rq_io_start_time_ns(rq), req_op(rq), rq->cmd_flags); |
1da177e4c
|
4004 |
|
53c583d22
|
4005 |
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; |
3ed9a2965
|
4006 |
|
365722bb9
|
4007 |
if (sync) { |
34b98d03b
|
4008 |
struct cfq_rb_root *st; |
f5f2b6ceb
|
4009 |
|
383cd7213
|
4010 |
RQ_CIC(rq)->ttime.last_end_request = now; |
f5f2b6ceb
|
4011 4012 |
if (cfq_cfqq_on_rr(cfqq)) |
34b98d03b
|
4013 |
st = cfqq->service_tree; |
f5f2b6ceb
|
4014 |
else |
34b98d03b
|
4015 4016 4017 4018 |
st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); st->ttime.last_end_request = now; |
149321a61
|
4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 |
/* * We have to do this check in jiffies since start_time is in * jiffies and it is not trivial to convert to ns. If * cfq_fifo_expire[1] ever comes close to 1 jiffie, this test * will become problematic but so far we are fine (the default * is 128 ms). */ if (!time_after(rq->start_time + nsecs_to_jiffies(cfqd->cfq_fifo_expire[1]), jiffies)) |
573412b29
|
4029 |
cfqd->last_delayed_sync = now; |
365722bb9
|
4030 |
} |
caaa5f9f0
|
4031 |
|
7700fc4f6
|
4032 4033 4034 |
#ifdef CONFIG_CFQ_GROUP_IOSCHED cfqq->cfqg->ttime.last_end_request = now; #endif |
caaa5f9f0
|
4035 4036 4037 4038 4039 |
/* * If this is the active queue, check if it needs to be expired, * or if we want to idle in case it has no pending requests. */ if (cfqd->active_queue == cfqq) { |
a36e71f99
|
4040 |
const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); |
44f7c1606
|
4041 4042 4043 4044 |
if (cfq_cfqq_slice_new(cfqq)) { cfq_set_prio_slice(cfqd, cfqq); cfq_clear_cfqq_slice_new(cfqq); } |
f75edf2dc
|
4045 4046 |
/* |
7667aa063
|
4047 4048 |
* Should we wait for next request to come in before we expire * the queue. |
f75edf2dc
|
4049 |
*/ |
7667aa063
|
4050 |
if (cfq_should_wait_busy(cfqd, cfqq)) { |
9a7f38c42
|
4051 |
u64 extend_sl = cfqd->cfq_slice_idle; |
80bdf0c78
|
4052 4053 |
if (!cfqd->cfq_slice_idle) extend_sl = cfqd->cfq_group_idle; |
9a7f38c42
|
4054 |
cfqq->slice_end = now + extend_sl; |
f75edf2dc
|
4055 |
cfq_mark_cfqq_wait_busy(cfqq); |
b1ffe737f
|
4056 |
cfq_log_cfqq(cfqd, cfqq, "will busy wait"); |
f75edf2dc
|
4057 |
} |
a36e71f99
|
4058 |
/* |
8e550632c
|
4059 4060 4061 4062 4063 4064 |
* Idling is not enabled on: * - expired queues * - idle-priority queues * - async queues * - queues with still some requests queued * - when there is a close cooperator |
a36e71f99
|
4065 |
*/ |
0871714e0
|
4066 |
if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) |
e5ff082e8
|
4067 |
cfq_slice_expired(cfqd, 1); |
8e550632c
|
4068 4069 |
else if (sync && cfqq_empty && !cfq_close_cooperator(cfqd, cfqq)) { |
749ef9f84
|
4070 |
cfq_arm_slice_timer(cfqd); |
8e550632c
|
4071 |
} |
caaa5f9f0
|
4072 |
} |
6d048f531
|
4073 |
|
53c583d22
|
4074 |
if (!cfqd->rq_in_driver) |
23e018a1b
|
4075 |
cfq_schedule_dispatch(cfqd); |
1da177e4c
|
4076 |
} |
b8269db45
|
4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 |
static void cfqq_boost_on_prio(struct cfq_queue *cfqq, int op_flags) { /* * If REQ_PRIO is set, boost class and prio level, if it's below * BE/NORM. If prio is not set, restore the potentially boosted * class/prio level. */ if (!(op_flags & REQ_PRIO)) { cfqq->ioprio_class = cfqq->org_ioprio_class; cfqq->ioprio = cfqq->org_ioprio; } else { if (cfq_class_idle(cfqq)) cfqq->ioprio_class = IOPRIO_CLASS_BE; if (cfqq->ioprio > IOPRIO_NORM) cfqq->ioprio = IOPRIO_NORM; } } |
89850f7ee
|
4094 |
static inline int __cfq_may_queue(struct cfq_queue *cfqq) |
22e2c507c
|
4095 |
{ |
1b379d8da
|
4096 |
if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { |
3b18152c3
|
4097 |
cfq_mark_cfqq_must_alloc_slice(cfqq); |
22e2c507c
|
4098 |
return ELV_MQUEUE_MUST; |
3b18152c3
|
4099 |
} |
1da177e4c
|
4100 |
|
22e2c507c
|
4101 |
return ELV_MQUEUE_MAY; |
22e2c507c
|
4102 |
} |
ba568ea0a
|
4103 |
static int cfq_may_queue(struct request_queue *q, int op, int op_flags) |
22e2c507c
|
4104 4105 4106 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; struct task_struct *tsk = current; |
c58698073
|
4107 |
struct cfq_io_cq *cic; |
22e2c507c
|
4108 4109 4110 4111 4112 4113 4114 4115 |
struct cfq_queue *cfqq; /* * don't force setup of a queue from here, as a call to may_queue * does not necessarily imply that a request actually will be queued. * so just lookup a possibly existing queue, or return 'may queue' * if that fails */ |
4ac845a2e
|
4116 |
cic = cfq_cic_lookup(cfqd, tsk->io_context); |
91fac317a
|
4117 4118 |
if (!cic) return ELV_MQUEUE_MAY; |
d9d8c5c48
|
4119 |
cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags)); |
22e2c507c
|
4120 |
if (cfqq) { |
abede6da2
|
4121 |
cfq_init_prio_data(cfqq, cic); |
b8269db45
|
4122 |
cfqq_boost_on_prio(cfqq, op_flags); |
22e2c507c
|
4123 |
|
89850f7ee
|
4124 |
return __cfq_may_queue(cfqq); |
22e2c507c
|
4125 4126 4127 |
} return ELV_MQUEUE_MAY; |
1da177e4c
|
4128 |
} |
1da177e4c
|
4129 4130 4131 |
/* * queue lock held here */ |
bb37b94c6
|
4132 |
static void cfq_put_request(struct request *rq) |
1da177e4c
|
4133 |
{ |
5e7053747
|
4134 |
struct cfq_queue *cfqq = RQ_CFQQ(rq); |
1da177e4c
|
4135 |
|
5e7053747
|
4136 |
if (cfqq) { |
22e2c507c
|
4137 |
const int rw = rq_data_dir(rq); |
1da177e4c
|
4138 |
|
22e2c507c
|
4139 4140 |
BUG_ON(!cfqq->allocated[rw]); cfqq->allocated[rw]--; |
1da177e4c
|
4141 |
|
7f1dc8a2d
|
4142 |
/* Put down rq reference on cfqg */ |
eb7d8c07f
|
4143 |
cfqg_put(RQ_CFQG(rq)); |
a612fddf0
|
4144 4145 |
rq->elv.priv[0] = NULL; rq->elv.priv[1] = NULL; |
7f1dc8a2d
|
4146 |
|
1da177e4c
|
4147 4148 4149 |
cfq_put_queue(cfqq); } } |
df5fe3e8e
|
4150 |
static struct cfq_queue * |
c58698073
|
4151 |
cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic, |
df5fe3e8e
|
4152 4153 4154 4155 |
struct cfq_queue *cfqq) { cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); cic_set_cfqq(cic, cfqq->new_cfqq, 1); |
b3b6d0408
|
4156 |
cfq_mark_cfqq_coop(cfqq->new_cfqq); |
df5fe3e8e
|
4157 4158 4159 |
cfq_put_queue(cfqq); return cic_to_cfqq(cic, 1); } |
e6c5bc737
|
4160 4161 4162 4163 4164 |
/* * Returns NULL if a new cfqq should be allocated, or the old cfqq if this * was the last process referring to said cfqq. */ static struct cfq_queue * |
c58698073
|
4165 |
split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq) |
e6c5bc737
|
4166 4167 |
{ if (cfqq_process_refs(cfqq) == 1) { |
e6c5bc737
|
4168 4169 |
cfqq->pid = current->pid; cfq_clear_cfqq_coop(cfqq); |
ae54abed6
|
4170 |
cfq_clear_cfqq_split_coop(cfqq); |
e6c5bc737
|
4171 4172 4173 4174 |
return cfqq; } cic_set_cfqq(cic, NULL, 1); |
d02a2c077
|
4175 4176 |
cfq_put_cooperator(cfqq); |
e6c5bc737
|
4177 4178 4179 |
cfq_put_queue(cfqq); return NULL; } |
1da177e4c
|
4180 |
/* |
22e2c507c
|
4181 |
* Allocate cfq data structures associated with this request. |
1da177e4c
|
4182 |
*/ |
22e2c507c
|
4183 |
static int |
852c788f8
|
4184 4185 |
cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio, gfp_t gfp_mask) |
1da177e4c
|
4186 4187 |
{ struct cfq_data *cfqd = q->elevator->elevator_data; |
f1f8cc946
|
4188 |
struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq); |
1da177e4c
|
4189 |
const int rw = rq_data_dir(rq); |
a6151c3a5
|
4190 |
const bool is_sync = rq_is_sync(rq); |
22e2c507c
|
4191 |
struct cfq_queue *cfqq; |
1da177e4c
|
4192 |
|
216284c35
|
4193 |
spin_lock_irq(q->queue_lock); |
f1f8cc946
|
4194 |
|
598971bfb
|
4195 4196 |
check_ioprio_changed(cic, bio); check_blkcg_changed(cic, bio); |
e6c5bc737
|
4197 |
new_queue: |
91fac317a
|
4198 |
cfqq = cic_to_cfqq(cic, is_sync); |
32f2e807a
|
4199 |
if (!cfqq || cfqq == &cfqd->oom_cfqq) { |
bce6133b0
|
4200 4201 |
if (cfqq) cfq_put_queue(cfqq); |
2da8de0bb
|
4202 |
cfqq = cfq_get_queue(cfqd, is_sync, cic, bio); |
91fac317a
|
4203 |
cic_set_cfqq(cic, cfqq, is_sync); |
df5fe3e8e
|
4204 4205 |
} else { /* |
e6c5bc737
|
4206 4207 |
* If the queue was seeky for too long, break it apart. */ |
ae54abed6
|
4208 |
if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { |
e6c5bc737
|
4209 4210 4211 4212 4213 4214 4215 |
cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); cfqq = split_cfqq(cic, cfqq); if (!cfqq) goto new_queue; } /* |
df5fe3e8e
|
4216 4217 4218 4219 4220 4221 4222 |
* Check to see if this queue is scheduled to merge with * another, closely cooperating queue. The merging of * queues happens here as it must be done in process context. * The reference on new_cfqq was taken in merge_cfqqs. */ if (cfqq->new_cfqq) cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); |
91fac317a
|
4223 |
} |
1da177e4c
|
4224 4225 |
cfqq->allocated[rw]++; |
1da177e4c
|
4226 |
|
6fae9c251
|
4227 |
cfqq->ref++; |
eb7d8c07f
|
4228 |
cfqg_get(cfqq->cfqg); |
a612fddf0
|
4229 |
rq->elv.priv[0] = cfqq; |
1adaf3dde
|
4230 |
rq->elv.priv[1] = cfqq->cfqg; |
216284c35
|
4231 |
spin_unlock_irq(q->queue_lock); |
5e7053747
|
4232 |
return 0; |
1da177e4c
|
4233 |
} |
65f27f384
|
4234 |
static void cfq_kick_queue(struct work_struct *work) |
22e2c507c
|
4235 |
{ |
65f27f384
|
4236 |
struct cfq_data *cfqd = |
23e018a1b
|
4237 |
container_of(work, struct cfq_data, unplug_work); |
165125e1e
|
4238 |
struct request_queue *q = cfqd->queue; |
22e2c507c
|
4239 |
|
40bb54d19
|
4240 |
spin_lock_irq(q->queue_lock); |
24ecfbe27
|
4241 |
__blk_run_queue(cfqd->queue); |
40bb54d19
|
4242 |
spin_unlock_irq(q->queue_lock); |
22e2c507c
|
4243 4244 4245 4246 4247 |
} /* * Timer running if the active_queue is currently idling inside its time slice */ |
911483258
|
4248 |
static enum hrtimer_restart cfq_idle_slice_timer(struct hrtimer *timer) |
22e2c507c
|
4249 |
{ |
911483258
|
4250 4251 |
struct cfq_data *cfqd = container_of(timer, struct cfq_data, idle_slice_timer); |
22e2c507c
|
4252 4253 |
struct cfq_queue *cfqq; unsigned long flags; |
3c6bd2f87
|
4254 |
int timed_out = 1; |
22e2c507c
|
4255 |
|
7b679138b
|
4256 |
cfq_log(cfqd, "idle timer fired"); |
22e2c507c
|
4257 |
spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
fe094d98e
|
4258 4259 |
cfqq = cfqd->active_queue; if (cfqq) { |
3c6bd2f87
|
4260 |
timed_out = 0; |
22e2c507c
|
4261 |
/* |
b029195dd
|
4262 4263 4264 4265 4266 4267 |
* We saw a request before the queue expired, let it through */ if (cfq_cfqq_must_dispatch(cfqq)) goto out_kick; /* |
22e2c507c
|
4268 4269 |
* expired */ |
44f7c1606
|
4270 |
if (cfq_slice_used(cfqq)) |
22e2c507c
|
4271 4272 4273 4274 4275 4276 |
goto expire; /* * only expire and reinvoke request handler, if there are * other queues with pending requests */ |
caaa5f9f0
|
4277 |
if (!cfqd->busy_queues) |
22e2c507c
|
4278 |
goto out_cont; |
22e2c507c
|
4279 4280 4281 4282 |
/* * not expired and it has a request pending, let it dispatch */ |
75e50984f
|
4283 |
if (!RB_EMPTY_ROOT(&cfqq->sort_list)) |
22e2c507c
|
4284 |
goto out_kick; |
76280aff1
|
4285 4286 4287 4288 4289 |
/* * Queue depth flag is reset only when the idle didn't succeed */ cfq_clear_cfqq_deep(cfqq); |
22e2c507c
|
4290 4291 |
} expire: |
e5ff082e8
|
4292 |
cfq_slice_expired(cfqd, timed_out); |
22e2c507c
|
4293 |
out_kick: |
23e018a1b
|
4294 |
cfq_schedule_dispatch(cfqd); |
22e2c507c
|
4295 4296 |
out_cont: spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
911483258
|
4297 |
return HRTIMER_NORESTART; |
22e2c507c
|
4298 |
} |
3b18152c3
|
4299 4300 |
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) { |
911483258
|
4301 |
hrtimer_cancel(&cfqd->idle_slice_timer); |
23e018a1b
|
4302 |
cancel_work_sync(&cfqd->unplug_work); |
3b18152c3
|
4303 |
} |
22e2c507c
|
4304 |
|
b374d18a4
|
4305 |
static void cfq_exit_queue(struct elevator_queue *e) |
1da177e4c
|
4306 |
{ |
22e2c507c
|
4307 |
struct cfq_data *cfqd = e->elevator_data; |
165125e1e
|
4308 |
struct request_queue *q = cfqd->queue; |
22e2c507c
|
4309 |
|
3b18152c3
|
4310 |
cfq_shutdown_timer_wq(cfqd); |
e2d74ac06
|
4311 |
|
d9ff41879
|
4312 |
spin_lock_irq(q->queue_lock); |
e2d74ac06
|
4313 |
|
d9ff41879
|
4314 |
if (cfqd->active_queue) |
e5ff082e8
|
4315 |
__cfq_slice_expired(cfqd, cfqd->active_queue, 0); |
e2d74ac06
|
4316 |
|
03aa264ac
|
4317 |
spin_unlock_irq(q->queue_lock); |
a90d742e4
|
4318 |
cfq_shutdown_timer_wq(cfqd); |
ffea73fc7
|
4319 4320 4321 |
#ifdef CONFIG_CFQ_GROUP_IOSCHED blkcg_deactivate_policy(q, &blkcg_policy_cfq); #else |
f51b802c1
|
4322 |
kfree(cfqd->root_group); |
2abae55f5
|
4323 |
#endif |
56edf7d75
|
4324 |
kfree(cfqd); |
1da177e4c
|
4325 |
} |
d50235b7b
|
4326 |
static int cfq_init_queue(struct request_queue *q, struct elevator_type *e) |
1da177e4c
|
4327 4328 |
{ struct cfq_data *cfqd; |
3c798398e
|
4329 |
struct blkcg_gq *blkg __maybe_unused; |
a2b1693ba
|
4330 |
int i, ret; |
d50235b7b
|
4331 4332 4333 4334 4335 |
struct elevator_queue *eq; eq = elevator_alloc(q, e); if (!eq) return -ENOMEM; |
1da177e4c
|
4336 |
|
c1b511eb2
|
4337 |
cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); |
d50235b7b
|
4338 4339 |
if (!cfqd) { kobject_put(&eq->kobj); |
b2fab5acd
|
4340 |
return -ENOMEM; |
d50235b7b
|
4341 4342 |
} eq->elevator_data = cfqd; |
80b15c738
|
4343 |
|
f51b802c1
|
4344 |
cfqd->queue = q; |
d50235b7b
|
4345 4346 4347 |
spin_lock_irq(q->queue_lock); q->elevator = eq; spin_unlock_irq(q->queue_lock); |
f51b802c1
|
4348 |
|
1fa8f6d68
|
4349 4350 |
/* Init root service tree */ cfqd->grp_service_tree = CFQ_RB_ROOT; |
f51b802c1
|
4351 |
/* Init root group and prefer root group over other groups by default */ |
25fb5169d
|
4352 |
#ifdef CONFIG_CFQ_GROUP_IOSCHED |
3c798398e
|
4353 |
ret = blkcg_activate_policy(q, &blkcg_policy_cfq); |
a2b1693ba
|
4354 4355 |
if (ret) goto out_free; |
f51b802c1
|
4356 |
|
a2b1693ba
|
4357 |
cfqd->root_group = blkg_to_cfqg(q->root_blkg); |
f51b802c1
|
4358 |
#else |
a2b1693ba
|
4359 |
ret = -ENOMEM; |
f51b802c1
|
4360 4361 |
cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group), GFP_KERNEL, cfqd->queue->node); |
a2b1693ba
|
4362 4363 |
if (!cfqd->root_group) goto out_free; |
5624a4e44
|
4364 |
|
a2b1693ba
|
4365 |
cfq_init_cfqg_base(cfqd->root_group); |
3ecca6293
|
4366 4367 |
cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL; cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL; |
69d7fde59
|
4368 |
#endif |
5624a4e44
|
4369 |
|
26a2ac009
|
4370 4371 4372 4373 4374 4375 4376 |
/* * Not strictly needed (since RB_ROOT just clears the node and we * zeroed cfqd on alloc), but better be safe in case someone decides * to add magic to the rb code */ for (i = 0; i < CFQ_PRIO_LISTS; i++) cfqd->prio_trees[i] = RB_ROOT; |
6118b70b3
|
4377 |
/* |
d4aad7ff0
|
4378 |
* Our fallback cfqq if cfq_get_queue() runs into OOM issues. |
6118b70b3
|
4379 |
* Grab a permanent reference to it, so that the normal code flow |
f51b802c1
|
4380 4381 4382 |
* will not attempt to free it. oom_cfqq is linked to root_group * but shouldn't hold a reference as it'll never be unlinked. Lose * the reference from linking right away. |
6118b70b3
|
4383 4384 |
*/ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); |
30d7b9448
|
4385 |
cfqd->oom_cfqq.ref++; |
1adaf3dde
|
4386 4387 |
spin_lock_irq(q->queue_lock); |
f51b802c1
|
4388 |
cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group); |
eb7d8c07f
|
4389 |
cfqg_put(cfqd->root_group); |
1adaf3dde
|
4390 |
spin_unlock_irq(q->queue_lock); |
1da177e4c
|
4391 |
|
911483258
|
4392 4393 |
hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
22e2c507c
|
4394 |
cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
22e2c507c
|
4395 |
|
23e018a1b
|
4396 |
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
22e2c507c
|
4397 |
|
1da177e4c
|
4398 |
cfqd->cfq_quantum = cfq_quantum; |
22e2c507c
|
4399 4400 |
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; |
1da177e4c
|
4401 4402 |
cfqd->cfq_back_max = cfq_back_max; cfqd->cfq_back_penalty = cfq_back_penalty; |
22e2c507c
|
4403 4404 |
cfqd->cfq_slice[0] = cfq_slice_async; cfqd->cfq_slice[1] = cfq_slice_sync; |
5bf14c072
|
4405 |
cfqd->cfq_target_latency = cfq_target_latency; |
22e2c507c
|
4406 |
cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
0bb979472
|
4407 |
cfqd->cfq_slice_idle = cfq_slice_idle; |
80bdf0c78
|
4408 |
cfqd->cfq_group_idle = cfq_group_idle; |
963b72fc6
|
4409 |
cfqd->cfq_latency = 1; |
e459dd08f
|
4410 |
cfqd->hw_tag = -1; |
edc71131c
|
4411 4412 4413 4414 |
/* * we optimistically start assuming sync ops weren't delayed in last * second, in order to have larger depth for async operations. */ |
9a7f38c42
|
4415 |
cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC; |
b2fab5acd
|
4416 |
return 0; |
a2b1693ba
|
4417 4418 4419 |
out_free: kfree(cfqd); |
d50235b7b
|
4420 |
kobject_put(&eq->kobj); |
a2b1693ba
|
4421 |
return ret; |
1da177e4c
|
4422 |
} |
0bb979472
|
4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 |
static void cfq_registered_queue(struct request_queue *q) { struct elevator_queue *e = q->elevator; struct cfq_data *cfqd = e->elevator_data; /* * Default to IOPS mode with no idling for SSDs */ if (blk_queue_nonrot(q)) cfqd->cfq_slice_idle = 0; } |
1da177e4c
|
4434 4435 4436 |
/* * sysfs parts below --> */ |
1da177e4c
|
4437 4438 4439 |
static ssize_t cfq_var_show(unsigned int var, char *page) { |
176167ad9
|
4440 4441 |
return sprintf(page, "%u ", var); |
1da177e4c
|
4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 |
} static ssize_t cfq_var_store(unsigned int *var, const char *page, size_t count) { char *p = (char *) page; *var = simple_strtoul(p, &p, 10); return count; } |
1da177e4c
|
4452 |
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
b374d18a4
|
4453 |
static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
1da177e4c
|
4454 |
{ \ |
3d1ab40f4
|
4455 |
struct cfq_data *cfqd = e->elevator_data; \ |
9a7f38c42
|
4456 |
u64 __data = __VAR; \ |
1da177e4c
|
4457 |
if (__CONV) \ |
9a7f38c42
|
4458 |
__data = div_u64(__data, NSEC_PER_MSEC); \ |
1da177e4c
|
4459 4460 4461 |
return cfq_var_show(__data, (page)); \ } SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); |
22e2c507c
|
4462 4463 |
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); |
e572ec7e4
|
4464 4465 |
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); |
22e2c507c
|
4466 |
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); |
80bdf0c78
|
4467 |
SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); |
22e2c507c
|
4468 4469 4470 |
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
963b72fc6
|
4471 |
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); |
5bf14c072
|
4472 |
SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1); |
1da177e4c
|
4473 |
#undef SHOW_FUNCTION |
d2d481d04
|
4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 |
#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct cfq_data *cfqd = e->elevator_data; \ u64 __data = __VAR; \ __data = div_u64(__data, NSEC_PER_USEC); \ return cfq_var_show(__data, (page)); \ } USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle); USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle); USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]); USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]); USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency); #undef USEC_SHOW_FUNCTION |
1da177e4c
|
4488 |
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
b374d18a4
|
4489 |
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
1da177e4c
|
4490 |
{ \ |
3d1ab40f4
|
4491 |
struct cfq_data *cfqd = e->elevator_data; \ |
1da177e4c
|
4492 4493 4494 4495 4496 4497 4498 |
unsigned int __data; \ int ret = cfq_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ if (__CONV) \ |
9a7f38c42
|
4499 |
*(__PTR) = (u64)__data * NSEC_PER_MSEC; \ |
1da177e4c
|
4500 4501 4502 4503 4504 |
else \ *(__PTR) = __data; \ return ret; \ } STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); |
fe094d98e
|
4505 4506 4507 4508 |
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); |
e572ec7e4
|
4509 |
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); |
fe094d98e
|
4510 4511 |
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); |
22e2c507c
|
4512 |
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); |
80bdf0c78
|
4513 |
STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); |
22e2c507c
|
4514 4515 |
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
fe094d98e
|
4516 4517 |
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); |
963b72fc6
|
4518 |
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); |
5bf14c072
|
4519 |
STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1); |
1da177e4c
|
4520 |
#undef STORE_FUNCTION |
d2d481d04
|
4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 |
#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data; \ int ret = cfq_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ *(__PTR) = (u64)__data * NSEC_PER_USEC; \ return ret; \ } USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX); USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX); USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX); USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX); USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX); #undef USEC_STORE_FUNCTION |
e572ec7e4
|
4540 4541 4542 4543 4544 |
#define CFQ_ATTR(name) \ __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(quantum), |
e572ec7e4
|
4545 4546 4547 4548 4549 |
CFQ_ATTR(fifo_expire_sync), CFQ_ATTR(fifo_expire_async), CFQ_ATTR(back_seek_max), CFQ_ATTR(back_seek_penalty), CFQ_ATTR(slice_sync), |
d2d481d04
|
4550 |
CFQ_ATTR(slice_sync_us), |
e572ec7e4
|
4551 |
CFQ_ATTR(slice_async), |
d2d481d04
|
4552 |
CFQ_ATTR(slice_async_us), |
e572ec7e4
|
4553 4554 |
CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), |
d2d481d04
|
4555 |
CFQ_ATTR(slice_idle_us), |
80bdf0c78
|
4556 |
CFQ_ATTR(group_idle), |
d2d481d04
|
4557 |
CFQ_ATTR(group_idle_us), |
963b72fc6
|
4558 |
CFQ_ATTR(low_latency), |
5bf14c072
|
4559 |
CFQ_ATTR(target_latency), |
d2d481d04
|
4560 |
CFQ_ATTR(target_latency_us), |
e572ec7e4
|
4561 |
__ATTR_NULL |
1da177e4c
|
4562 |
}; |
1da177e4c
|
4563 4564 4565 4566 4567 |
static struct elevator_type iosched_cfq = { .ops = { .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, |
72ef799b3
|
4568 4569 |
.elevator_allow_bio_merge_fn = cfq_allow_bio_merge, .elevator_allow_rq_merge_fn = cfq_allow_rq_merge, |
812d40264
|
4570 |
.elevator_bio_merged_fn = cfq_bio_merged, |
b4878f245
|
4571 |
.elevator_dispatch_fn = cfq_dispatch_requests, |
1da177e4c
|
4572 |
.elevator_add_req_fn = cfq_insert_request, |
b4878f245
|
4573 |
.elevator_activate_req_fn = cfq_activate_request, |
1da177e4c
|
4574 |
.elevator_deactivate_req_fn = cfq_deactivate_request, |
1da177e4c
|
4575 |
.elevator_completed_req_fn = cfq_completed_request, |
21183b07e
|
4576 4577 |
.elevator_former_req_fn = elv_rb_former_request, .elevator_latter_req_fn = elv_rb_latter_request, |
9b84cacd0
|
4578 |
.elevator_init_icq_fn = cfq_init_icq, |
7e5a87944
|
4579 |
.elevator_exit_icq_fn = cfq_exit_icq, |
1da177e4c
|
4580 4581 4582 4583 4584 |
.elevator_set_req_fn = cfq_set_request, .elevator_put_req_fn = cfq_put_request, .elevator_may_queue_fn = cfq_may_queue, .elevator_init_fn = cfq_init_queue, .elevator_exit_fn = cfq_exit_queue, |
0bb979472
|
4585 |
.elevator_registered_fn = cfq_registered_queue, |
1da177e4c
|
4586 |
}, |
3d3c2379f
|
4587 4588 |
.icq_size = sizeof(struct cfq_io_cq), .icq_align = __alignof__(struct cfq_io_cq), |
3d1ab40f4
|
4589 |
.elevator_attrs = cfq_attrs, |
3d3c2379f
|
4590 |
.elevator_name = "cfq", |
1da177e4c
|
4591 4592 |
.elevator_owner = THIS_MODULE, }; |
3e2520668
|
4593 |
#ifdef CONFIG_CFQ_GROUP_IOSCHED |
3c798398e
|
4594 |
static struct blkcg_policy blkcg_policy_cfq = { |
2ee867dcf
|
4595 |
.dfl_cftypes = cfq_blkcg_files, |
880f50e22
|
4596 |
.legacy_cftypes = cfq_blkcg_legacy_files, |
f9fcc2d39
|
4597 |
|
e4a9bde95
|
4598 |
.cpd_alloc_fn = cfq_cpd_alloc, |
e48453c38
|
4599 |
.cpd_init_fn = cfq_cpd_init, |
e4a9bde95
|
4600 |
.cpd_free_fn = cfq_cpd_free, |
69d7fde59
|
4601 |
.cpd_bind_fn = cfq_cpd_bind, |
e4a9bde95
|
4602 |
|
001bea73e
|
4603 |
.pd_alloc_fn = cfq_pd_alloc, |
f9fcc2d39
|
4604 |
.pd_init_fn = cfq_pd_init, |
0b39920b5
|
4605 |
.pd_offline_fn = cfq_pd_offline, |
001bea73e
|
4606 |
.pd_free_fn = cfq_pd_free, |
f9fcc2d39
|
4607 |
.pd_reset_stats_fn = cfq_pd_reset_stats, |
3e2520668
|
4608 |
}; |
3e2520668
|
4609 |
#endif |
1da177e4c
|
4610 4611 |
static int __init cfq_init(void) { |
3d3c2379f
|
4612 |
int ret; |
80bdf0c78
|
4613 |
#ifdef CONFIG_CFQ_GROUP_IOSCHED |
3c798398e
|
4614 |
ret = blkcg_policy_register(&blkcg_policy_cfq); |
8bd435b30
|
4615 4616 |
if (ret) return ret; |
ffea73fc7
|
4617 4618 4619 |
#else cfq_group_idle = 0; #endif |
8bd435b30
|
4620 |
|
fd7949564
|
4621 |
ret = -ENOMEM; |
3d3c2379f
|
4622 4623 |
cfq_pool = KMEM_CACHE(cfq_queue, 0); if (!cfq_pool) |
8bd435b30
|
4624 |
goto err_pol_unreg; |
1da177e4c
|
4625 |
|
3d3c2379f
|
4626 |
ret = elv_register(&iosched_cfq); |
8bd435b30
|
4627 4628 |
if (ret) goto err_free_pool; |
3d3c2379f
|
4629 |
|
2fdd82bd8
|
4630 |
return 0; |
8bd435b30
|
4631 4632 4633 4634 |
err_free_pool: kmem_cache_destroy(cfq_pool); err_pol_unreg: |
ffea73fc7
|
4635 |
#ifdef CONFIG_CFQ_GROUP_IOSCHED |
3c798398e
|
4636 |
blkcg_policy_unregister(&blkcg_policy_cfq); |
ffea73fc7
|
4637 |
#endif |
8bd435b30
|
4638 |
return ret; |
1da177e4c
|
4639 4640 4641 4642 |
} static void __exit cfq_exit(void) { |
ffea73fc7
|
4643 |
#ifdef CONFIG_CFQ_GROUP_IOSCHED |
3c798398e
|
4644 |
blkcg_policy_unregister(&blkcg_policy_cfq); |
ffea73fc7
|
4645 |
#endif |
1da177e4c
|
4646 |
elv_unregister(&iosched_cfq); |
3d3c2379f
|
4647 |
kmem_cache_destroy(cfq_pool); |
1da177e4c
|
4648 4649 4650 4651 4652 4653 4654 4655 |
} module_init(cfq_init); module_exit(cfq_exit); MODULE_AUTHOR("Jens Axboe"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); |