Blame view

block/cfq-iosched.c 99.7 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
6
   *  CFQ, or complete fairness queueing, disk scheduler.
   *
   *  Based on ideas from a previously unfinished io
   *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   *
0fe234795   Jens Axboe   [PATCH] Update ax...
7
   *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
  #include <linux/module.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
10
  #include <linux/slab.h>
1cc9be68e   Al Viro   [PATCH] noise rem...
11
12
  #include <linux/blkdev.h>
  #include <linux/elevator.h>
ad5ebd2fa   Randy Dunlap   block: jiffies fixes
13
  #include <linux/jiffies.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
  #include <linux/rbtree.h>
22e2c507c   Jens Axboe   [PATCH] Update cf...
15
  #include <linux/ioprio.h>
7b679138b   Jens Axboe   cfq-iosched: add ...
16
  #include <linux/blktrace_api.h>
6e736be7f   Tejun Heo   block: make ioc g...
17
  #include "blk.h"
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
18
  #include "cfq.h"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
19
20
21
22
  
  /*
   * tunables
   */
fe094d98e   Jens Axboe   cfq-iosched: make...
23
  /* max queue in one round of service */
abc3c744d   Shaohua Li   cfq-iosched: quan...
24
  static const int cfq_quantum = 8;
64100099e   Arjan van de Ven   [BLOCK] mark some...
25
  static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
fe094d98e   Jens Axboe   cfq-iosched: make...
26
27
28
29
  /* maximum backwards seek, in KiB */
  static const int cfq_back_max = 16 * 1024;
  /* penalty of a backwards seek */
  static const int cfq_back_penalty = 2;
64100099e   Arjan van de Ven   [BLOCK] mark some...
30
  static const int cfq_slice_sync = HZ / 10;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
31
  static int cfq_slice_async = HZ / 25;
64100099e   Arjan van de Ven   [BLOCK] mark some...
32
  static const int cfq_slice_async_rq = 2;
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
33
  static int cfq_slice_idle = HZ / 125;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
34
  static int cfq_group_idle = HZ / 125;
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
35
36
  static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  static const int cfq_hist_divisor = 4;
22e2c507c   Jens Axboe   [PATCH] Update cf...
37

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
38
  /*
0871714e0   Jens Axboe   cfq-iosched: rela...
39
   * offset from end of service tree
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
40
   */
0871714e0   Jens Axboe   cfq-iosched: rela...
41
  #define CFQ_IDLE_DELAY		(HZ / 5)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
42
43
44
45
46
  
  /*
   * below this threshold, we consider thinktime immediate
   */
  #define CFQ_MIN_TT		(2)
22e2c507c   Jens Axboe   [PATCH] Update cf...
47
  #define CFQ_SLICE_SCALE		(5)
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
48
  #define CFQ_HW_QUEUE_MIN	(5)
25bc6b077   Vivek Goyal   blkio: Introduce ...
49
  #define CFQ_SERVICE_SHIFT       12
22e2c507c   Jens Axboe   [PATCH] Update cf...
50

3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
51
  #define CFQQ_SEEK_THR		(sector_t)(8 * 100)
e9ce335df   Shaohua Li   cfq-iosched: fix ...
52
  #define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)
41647e7a9   Corrado Zoccolo   cfq-iosched: reth...
53
  #define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
54
  #define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)
ae54abed6   Shaohua Li   cfq-iosched: spli...
55

a612fddf0   Tejun Heo   block, cfq: move ...
56
57
58
  #define RQ_CIC(rq)		icq_to_cic((rq)->elv.icq)
  #define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elv.priv[0])
  #define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elv.priv[1])
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
59

e18b890bb   Christoph Lameter   [PATCH] slab: rem...
60
  static struct kmem_cache *cfq_pool;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
61

22e2c507c   Jens Axboe   [PATCH] Update cf...
62
63
  #define CFQ_PRIO_LISTS		IOPRIO_BE_NR
  #define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507c   Jens Axboe   [PATCH] Update cf...
64
  #define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
65
  #define sample_valid(samples)	((samples) > 80)
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
66
  #define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
67

c58698073   Tejun Heo   block, cfq: reorg...
68
69
70
71
72
73
74
  struct cfq_ttime {
  	unsigned long last_end_request;
  
  	unsigned long ttime_total;
  	unsigned long ttime_samples;
  	unsigned long ttime_mean;
  };
22e2c507c   Jens Axboe   [PATCH] Update cf...
75
  /*
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
76
77
78
79
80
81
82
83
   * Most of our rbtree usage is for sorting with min extraction, so
   * if we cache the leftmost node we don't have to walk down the tree
   * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
   * move this into the elevator for the rq sorting as well.
   */
  struct cfq_rb_root {
  	struct rb_root rb;
  	struct rb_node *left;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
84
  	unsigned count;
73e9ffdd0   Richard Kennedy   cfq: remove 8 byt...
85
  	unsigned total_weight;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
86
  	u64 min_vdisktime;
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
87
  	struct cfq_ttime ttime;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
88
  };
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
89
90
  #define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, \
  			.ttime = {.last_end_request = jiffies,},}
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
91
92
  
  /*
6118b70b3   Jens Axboe   cfq-iosched: get ...
93
94
95
96
   * Per process-grouping structure
   */
  struct cfq_queue {
  	/* reference count */
30d7b9448   Shaohua Li   block cfq: don't ...
97
  	int ref;
6118b70b3   Jens Axboe   cfq-iosched: get ...
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
  	/* various state flags, see below */
  	unsigned int flags;
  	/* parent cfq_data */
  	struct cfq_data *cfqd;
  	/* service_tree member */
  	struct rb_node rb_node;
  	/* service_tree key */
  	unsigned long rb_key;
  	/* prio tree member */
  	struct rb_node p_node;
  	/* prio tree root we belong to, if any */
  	struct rb_root *p_root;
  	/* sorted list of pending requests */
  	struct rb_root sort_list;
  	/* if fifo isn't expired, next request to serve */
  	struct request *next_rq;
  	/* requests queued in sort_list */
  	int queued[2];
  	/* currently allocated requests */
  	int allocated[2];
  	/* fifo list of requests in sort_list */
  	struct list_head fifo;
dae739ebc   Vivek Goyal   blkio: Group time...
120
121
  	/* time when queue got scheduled in to dispatch first request. */
  	unsigned long dispatch_start;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
122
  	unsigned int allocated_slice;
c4081ba5c   Richard Kennedy   cfq: reorder cfq_...
123
  	unsigned int slice_dispatch;
dae739ebc   Vivek Goyal   blkio: Group time...
124
125
  	/* time when first request from queue completed and slice started. */
  	unsigned long slice_start;
6118b70b3   Jens Axboe   cfq-iosched: get ...
126
127
  	unsigned long slice_end;
  	long slice_resid;
6118b70b3   Jens Axboe   cfq-iosched: get ...
128

65299a3b7   Christoph Hellwig   block: separate p...
129
130
  	/* pending priority requests */
  	int prio_pending;
6118b70b3   Jens Axboe   cfq-iosched: get ...
131
132
133
134
135
  	/* number of requests that are on the dispatch list or inside driver */
  	int dispatched;
  
  	/* io prio of this group */
  	unsigned short ioprio, org_ioprio;
4aede84b3   Justin TerAvest   fixlet: Remove fs...
136
  	unsigned short ioprio_class;
6118b70b3   Jens Axboe   cfq-iosched: get ...
137

c4081ba5c   Richard Kennedy   cfq: reorder cfq_...
138
  	pid_t pid;
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
139
  	u32 seek_history;
b2c18e1e0   Jeff Moyer   cfq: calculate th...
140
  	sector_t last_request_pos;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
141
  	struct cfq_rb_root *service_tree;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
142
  	struct cfq_queue *new_cfqq;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
143
  	struct cfq_group *cfqg;
c4e7893eb   Vivek Goyal   cfq-iosched: blkt...
144
145
  	/* Number of sectors dispatched from queue in single dispatch round */
  	unsigned long nr_sectors;
6118b70b3   Jens Axboe   cfq-iosched: get ...
146
147
148
  };
  
  /*
718eee057   Corrado Zoccolo   cfq-iosched: fair...
149
   * First index in the service_trees.
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
150
151
152
   * IDLE is handled separately, so it has negative index
   */
  enum wl_prio_t {
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
153
  	BE_WORKLOAD = 0,
615f0259e   Vivek Goyal   blkio: Implement ...
154
155
  	RT_WORKLOAD = 1,
  	IDLE_WORKLOAD = 2,
b4627321e   Vivek Goyal   cfq-iosched: Fix ...
156
  	CFQ_PRIO_NR,
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
157
158
159
  };
  
  /*
718eee057   Corrado Zoccolo   cfq-iosched: fair...
160
161
162
163
164
165
166
   * Second index in the service_trees.
   */
  enum wl_type_t {
  	ASYNC_WORKLOAD = 0,
  	SYNC_NOIDLE_WORKLOAD = 1,
  	SYNC_WORKLOAD = 2
  };
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
167
168
  /* This is per cgroup per device grouping structure */
  struct cfq_group {
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
169
170
171
172
173
  	/* group service_tree member */
  	struct rb_node rb_node;
  
  	/* group service_tree key */
  	u64 vdisktime;
25bc6b077   Vivek Goyal   blkio: Introduce ...
174
  	unsigned int weight;
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
175
176
  	unsigned int new_weight;
  	bool needs_update;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
177
178
179
  
  	/* number of cfqq currently on this group */
  	int nr_cfqq;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
180
  	/*
4495a7d41   Kyungmin Park   CFQ: Fix typo and...
181
  	 * Per group busy queues average. Useful for workload slice calc. We
b4627321e   Vivek Goyal   cfq-iosched: Fix ...
182
183
184
185
186
187
188
189
190
191
192
  	 * create the array for each prio class but at run time it is used
  	 * only for RT and BE class and slot for IDLE class remains unused.
  	 * This is primarily done to avoid confusion and a gcc warning.
  	 */
  	unsigned int busy_queues_avg[CFQ_PRIO_NR];
  	/*
  	 * rr lists of queues with requests. We maintain service trees for
  	 * RT and BE classes. These trees are subdivided in subclasses
  	 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
  	 * class there is no subclassification and all the cfq queues go on
  	 * a single tree service_tree_idle.
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
193
194
195
196
  	 * Counts are embedded in the cfq_rb_root
  	 */
  	struct cfq_rb_root service_trees[2][3];
  	struct cfq_rb_root service_tree_idle;
dae739ebc   Vivek Goyal   blkio: Group time...
197
198
199
200
  
  	unsigned long saved_workload_slice;
  	enum wl_type_t saved_workload;
  	enum wl_prio_t saved_serving_prio;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
201
202
203
  	struct blkio_group blkg;
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	struct hlist_node cfqd_node;
329a67815   Shaohua Li   block cfq: don't ...
204
  	int ref;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
205
  #endif
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
206
207
  	/* number of requests that are on the dispatch list or inside driver */
  	int dispatched;
7700fc4f6   Shaohua Li   CFQ: add think ti...
208
  	struct cfq_ttime ttime;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
209
  };
718eee057   Corrado Zoccolo   cfq-iosched: fair...
210

c58698073   Tejun Heo   block, cfq: reorg...
211
212
213
214
215
  struct cfq_io_cq {
  	struct io_cq		icq;		/* must be the first member */
  	struct cfq_queue	*cfqq[2];
  	struct cfq_ttime	ttime;
  };
718eee057   Corrado Zoccolo   cfq-iosched: fair...
216
  /*
22e2c507c   Jens Axboe   [PATCH] Update cf...
217
218
   * Per block device queue structure
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
219
  struct cfq_data {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
220
  	struct request_queue *queue;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
221
222
  	/* Root service tree for cfq_groups */
  	struct cfq_rb_root grp_service_tree;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
223
  	struct cfq_group root_group;
22e2c507c   Jens Axboe   [PATCH] Update cf...
224
225
  
  	/*
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
226
  	 * The priority currently being served
22e2c507c   Jens Axboe   [PATCH] Update cf...
227
  	 */
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
228
  	enum wl_prio_t serving_prio;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
229
230
  	enum wl_type_t serving_type;
  	unsigned long workload_expires;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
231
  	struct cfq_group *serving_group;
a36e71f99   Jens Axboe   cfq-iosched: add ...
232
233
234
235
236
237
238
  
  	/*
  	 * Each priority tree is sorted by next_request position.  These
  	 * trees are used when determining if two or more queues are
  	 * interleaving requests (see cfq_close_cooperator).
  	 */
  	struct rb_root prio_trees[CFQ_PRIO_LISTS];
22e2c507c   Jens Axboe   [PATCH] Update cf...
239
  	unsigned int busy_queues;
ef8a41df8   Shaohua Li   cfq-iosched: give...
240
  	unsigned int busy_sync_queues;
22e2c507c   Jens Axboe   [PATCH] Update cf...
241

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
242
243
  	int rq_in_driver;
  	int rq_in_flight[2];
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
244
245
246
247
248
  
  	/*
  	 * queue-depth detection
  	 */
  	int rq_queued;
25776e359   Jens Axboe   [PATCH] cfq-iosch...
249
  	int hw_tag;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
250
251
252
253
254
255
256
257
  	/*
  	 * hw_tag can be
  	 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
  	 *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
  	 *  0 => no NCQ
  	 */
  	int hw_tag_est_depth;
  	unsigned int hw_tag_samples;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
258

22e2c507c   Jens Axboe   [PATCH] Update cf...
259
  	/*
22e2c507c   Jens Axboe   [PATCH] Update cf...
260
261
262
  	 * idle window management
  	 */
  	struct timer_list idle_slice_timer;
23e018a1b   Jens Axboe   block: get rid of...
263
  	struct work_struct unplug_work;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
264

22e2c507c   Jens Axboe   [PATCH] Update cf...
265
  	struct cfq_queue *active_queue;
c58698073   Tejun Heo   block, cfq: reorg...
266
  	struct cfq_io_cq *active_cic;
22e2c507c   Jens Axboe   [PATCH] Update cf...
267

c2dea2d1f   Vasily Tarasov   cfq: async queue ...
268
269
270
271
272
  	/*
  	 * async queue for each priority case
  	 */
  	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
  	struct cfq_queue *async_idle_cfqq;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
273

6d048f531   Jens Axboe   cfq-iosched: deve...
274
  	sector_t last_position;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
275

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
276
277
278
279
  	/*
  	 * tunables, see top of file
  	 */
  	unsigned int cfq_quantum;
22e2c507c   Jens Axboe   [PATCH] Update cf...
280
  	unsigned int cfq_fifo_expire[2];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
281
282
  	unsigned int cfq_back_penalty;
  	unsigned int cfq_back_max;
22e2c507c   Jens Axboe   [PATCH] Update cf...
283
284
285
  	unsigned int cfq_slice[2];
  	unsigned int cfq_slice_async_rq;
  	unsigned int cfq_slice_idle;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
286
  	unsigned int cfq_group_idle;
963b72fc6   Jens Axboe   cfq-iosched: rena...
287
  	unsigned int cfq_latency;
d9ff41879   Al Viro   [PATCH] make cfq_...
288

6118b70b3   Jens Axboe   cfq-iosched: get ...
289
290
291
292
  	/*
  	 * Fallback dummy cfqq for extreme OOM conditions
  	 */
  	struct cfq_queue oom_cfqq;
365722bb9   Vivek Goyal   cfq-iosched: dela...
293

573412b29   Corrado Zoccolo   cfq-iosched: redu...
294
  	unsigned long last_delayed_sync;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
295
296
297
  
  	/* List of cfq groups being managed on this device*/
  	struct hlist_head cfqg_list;
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
298
299
300
  
  	/* Number of groups which are on blkcg->blkg_list */
  	unsigned int nr_blkcg_linked_grps;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
301
  };
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
302
  static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
303
304
  static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
  					    enum wl_prio_t prio,
65b32a573   Vivek Goyal   cfq-iosched: Remo...
305
  					    enum wl_type_t type)
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
306
  {
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
307
308
  	if (!cfqg)
  		return NULL;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
309
  	if (prio == IDLE_WORKLOAD)
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
310
  		return &cfqg->service_tree_idle;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
311

cdb16e8f7   Vivek Goyal   blkio: Introduce ...
312
  	return &cfqg->service_trees[prio][type];
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
313
  }
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
314
  enum cfqq_state_flags {
b0b8d7494   Jens Axboe   cfq-iosched: docu...
315
316
  	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
  	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
b029195dd   Jens Axboe   cfq-iosched: don'...
317
  	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
b0b8d7494   Jens Axboe   cfq-iosched: docu...
318
  	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
b0b8d7494   Jens Axboe   cfq-iosched: docu...
319
320
321
  	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
  	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
  	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
44f7c1606   Jens Axboe   cfq-iosched: defe...
322
  	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
91fac317a   Vasily Tarasov   cfq-iosched: get ...
323
  	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
b3b6d0408   Jeff Moyer   cfq: change the m...
324
  	CFQ_CFQQ_FLAG_coop,		/* cfqq is shared */
ae54abed6   Shaohua Li   cfq-iosched: spli...
325
  	CFQ_CFQQ_FLAG_split_coop,	/* shared cfqq will be splitted */
76280aff1   Corrado Zoccolo   cfq-iosched: idli...
326
  	CFQ_CFQQ_FLAG_deep,		/* sync cfqq experienced large depth */
f75edf2dc   Vivek Goyal   blkio: Wait for c...
327
  	CFQ_CFQQ_FLAG_wait_busy,	/* Waiting for next request */
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
328
329
330
331
332
  };
  
  #define CFQ_CFQQ_FNS(name)						\
  static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
  {									\
fe094d98e   Jens Axboe   cfq-iosched: make...
333
  	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
334
335
336
  }									\
  static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
  {									\
fe094d98e   Jens Axboe   cfq-iosched: make...
337
  	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
338
339
340
  }									\
  static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
  {									\
fe094d98e   Jens Axboe   cfq-iosched: make...
341
  	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
342
343
344
345
  }
  
  CFQ_CFQQ_FNS(on_rr);
  CFQ_CFQQ_FNS(wait_request);
b029195dd   Jens Axboe   cfq-iosched: don'...
346
  CFQ_CFQQ_FNS(must_dispatch);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
347
  CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
348
349
350
  CFQ_CFQQ_FNS(fifo_expire);
  CFQ_CFQQ_FNS(idle_window);
  CFQ_CFQQ_FNS(prio_changed);
44f7c1606   Jens Axboe   cfq-iosched: defe...
351
  CFQ_CFQQ_FNS(slice_new);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
352
  CFQ_CFQQ_FNS(sync);
a36e71f99   Jens Axboe   cfq-iosched: add ...
353
  CFQ_CFQQ_FNS(coop);
ae54abed6   Shaohua Li   cfq-iosched: spli...
354
  CFQ_CFQQ_FNS(split_coop);
76280aff1   Corrado Zoccolo   cfq-iosched: idli...
355
  CFQ_CFQQ_FNS(deep);
f75edf2dc   Vivek Goyal   blkio: Wait for c...
356
  CFQ_CFQQ_FNS(wait_busy);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
357
  #undef CFQ_CFQQ_FNS
afc24d49c   Vivek Goyal   blk-cgroup: confi...
358
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
2868ef7b3   Vivek Goyal   blkio: Some debug...
359
360
361
  #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
  	blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
  			cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
4495a7d41   Kyungmin Park   CFQ: Fix typo and...
362
  			blkg_path(&(cfqq)->cfqg->blkg), ##args)
2868ef7b3   Vivek Goyal   blkio: Some debug...
363
364
365
  
  #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)				\
  	blk_add_trace_msg((cfqd)->queue, "%s " fmt,			\
4495a7d41   Kyungmin Park   CFQ: Fix typo and...
366
  				blkg_path(&(cfqg)->blkg), ##args)       \
2868ef7b3   Vivek Goyal   blkio: Some debug...
367
368
  
  #else
7b679138b   Jens Axboe   cfq-iosched: add ...
369
370
  #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
  	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
4495a7d41   Kyungmin Park   CFQ: Fix typo and...
371
  #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
2868ef7b3   Vivek Goyal   blkio: Some debug...
372
  #endif
7b679138b   Jens Axboe   cfq-iosched: add ...
373
374
  #define cfq_log(cfqd, fmt, args...)	\
  	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
615f0259e   Vivek Goyal   blkio: Implement ...
375
376
377
378
379
380
381
382
383
  /* Traverses through cfq group service trees */
  #define for_each_cfqg_st(cfqg, i, j, st) \
  	for (i = 0; i <= IDLE_WORKLOAD; i++) \
  		for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
  			: &cfqg->service_tree_idle; \
  			(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
  			(i == IDLE_WORKLOAD && j == 0); \
  			j++, st = i < IDLE_WORKLOAD ? \
  			&cfqg->service_trees[i][j]: NULL) \
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
384
385
386
387
388
389
390
391
392
393
394
395
  static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
  	struct cfq_ttime *ttime, bool group_idle)
  {
  	unsigned long slice;
  	if (!sample_valid(ttime->ttime_samples))
  		return false;
  	if (group_idle)
  		slice = cfqd->cfq_group_idle;
  	else
  		slice = cfqd->cfq_slice_idle;
  	return ttime->ttime_mean > slice;
  }
615f0259e   Vivek Goyal   blkio: Implement ...
396

02b35081f   Vivek Goyal   cfq-iosched: Do g...
397
398
399
400
401
402
403
404
405
406
407
408
409
410
  static inline bool iops_mode(struct cfq_data *cfqd)
  {
  	/*
  	 * If we are not idling on queues and it is a NCQ drive, parallel
  	 * execution of requests is on and measuring time is not possible
  	 * in most of the cases until and unless we drive shallower queue
  	 * depths and that becomes a performance bottleneck. In such cases
  	 * switch to start providing fairness in terms of number of IOs.
  	 */
  	if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
  		return true;
  	else
  		return false;
  }
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
411
412
413
414
415
416
417
418
  static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
  {
  	if (cfq_class_idle(cfqq))
  		return IDLE_WORKLOAD;
  	if (cfq_class_rt(cfqq))
  		return RT_WORKLOAD;
  	return BE_WORKLOAD;
  }
718eee057   Corrado Zoccolo   cfq-iosched: fair...
419
420
421
422
423
424
425
426
427
  
  static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
  {
  	if (!cfq_cfqq_sync(cfqq))
  		return ASYNC_WORKLOAD;
  	if (!cfq_cfqq_idle_window(cfqq))
  		return SYNC_NOIDLE_WORKLOAD;
  	return SYNC_WORKLOAD;
  }
58ff82f34   Vivek Goyal   blkio: Implement ...
428
429
430
  static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
  					struct cfq_data *cfqd,
  					struct cfq_group *cfqg)
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
431
432
  {
  	if (wl == IDLE_WORKLOAD)
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
433
  		return cfqg->service_tree_idle.count;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
434

cdb16e8f7   Vivek Goyal   blkio: Introduce ...
435
436
437
  	return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
  		+ cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
  		+ cfqg->service_trees[wl][SYNC_WORKLOAD].count;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
438
  }
f26bd1f0a   Vivek Goyal   blkio: Determine ...
439
440
441
442
443
444
  static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
  					struct cfq_group *cfqg)
  {
  	return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
  		+ cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
445
  static void cfq_dispatch_insert(struct request_queue *, struct request *);
a6151c3a5   Jens Axboe   cfq-iosched: appl...
446
  static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
fd0928df9   Jens Axboe   ioprio: move io p...
447
  				       struct io_context *, gfp_t);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
448

c58698073   Tejun Heo   block, cfq: reorg...
449
450
451
452
453
  static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
  {
  	/* cic->icq is the first member, %NULL will convert to %NULL */
  	return container_of(icq, struct cfq_io_cq, icq);
  }
47fdd4ca9   Tejun Heo   block, cfq: move ...
454
455
456
457
458
459
460
  static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
  					       struct io_context *ioc)
  {
  	if (ioc)
  		return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
  	return NULL;
  }
c58698073   Tejun Heo   block, cfq: reorg...
461
  static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
91fac317a   Vasily Tarasov   cfq-iosched: get ...
462
  {
a6151c3a5   Jens Axboe   cfq-iosched: appl...
463
  	return cic->cfqq[is_sync];
91fac317a   Vasily Tarasov   cfq-iosched: get ...
464
  }
c58698073   Tejun Heo   block, cfq: reorg...
465
466
  static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
  				bool is_sync)
91fac317a   Vasily Tarasov   cfq-iosched: get ...
467
  {
a6151c3a5   Jens Axboe   cfq-iosched: appl...
468
  	cic->cfqq[is_sync] = cfqq;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
469
  }
c58698073   Tejun Heo   block, cfq: reorg...
470
  static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
471
  {
c58698073   Tejun Heo   block, cfq: reorg...
472
  	return cic->icq.q->elevator->elevator_data;
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
473
  }
91fac317a   Vasily Tarasov   cfq-iosched: get ...
474
475
476
477
  /*
   * We regard a request as SYNC, if it's either a read or has the SYNC bit
   * set (in which case it could also be direct WRITE).
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
478
  static inline bool cfq_bio_sync(struct bio *bio)
91fac317a   Vasily Tarasov   cfq-iosched: get ...
479
  {
7b6d91dae   Christoph Hellwig   block: unify flag...
480
  	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
481
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
482

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
483
  /*
99f95e528   Andrew Morton   [PATCH] cfq build...
484
485
486
   * scheduler run of queue, if there are requests pending and no one in the
   * driver that will restart queueing
   */
23e018a1b   Jens Axboe   block: get rid of...
487
  static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e528   Andrew Morton   [PATCH] cfq build...
488
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
489
490
  	if (cfqd->busy_queues) {
  		cfq_log(cfqd, "schedule dispatch");
23e018a1b   Jens Axboe   block: get rid of...
491
  		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
7b679138b   Jens Axboe   cfq-iosched: add ...
492
  	}
99f95e528   Andrew Morton   [PATCH] cfq build...
493
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
494
  /*
44f7c1606   Jens Axboe   cfq-iosched: defe...
495
496
497
498
   * Scale schedule slice based on io priority. Use the sync time slice only
   * if a queue is marked sync and has sync io queued. A sync queue with async
   * io only, should not get full sync slice length.
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
499
  static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
500
  				 unsigned short prio)
44f7c1606   Jens Axboe   cfq-iosched: defe...
501
  {
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
502
  	const int base_slice = cfqd->cfq_slice[sync];
44f7c1606   Jens Axboe   cfq-iosched: defe...
503

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
504
505
506
507
  	WARN_ON(prio >= IOPRIO_BE_NR);
  
  	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
  }
44f7c1606   Jens Axboe   cfq-iosched: defe...
508

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
509
510
511
512
  static inline int
  cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c1606   Jens Axboe   cfq-iosched: defe...
513
  }
25bc6b077   Vivek Goyal   blkio: Introduce ...
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
  static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
  {
  	u64 d = delta << CFQ_SERVICE_SHIFT;
  
  	d = d * BLKIO_WEIGHT_DEFAULT;
  	do_div(d, cfqg->weight);
  	return d;
  }
  
  static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
  {
  	s64 delta = (s64)(vdisktime - min_vdisktime);
  	if (delta > 0)
  		min_vdisktime = vdisktime;
  
  	return min_vdisktime;
  }
  
  static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
  {
  	s64 delta = (s64)(vdisktime - min_vdisktime);
  	if (delta < 0)
  		min_vdisktime = vdisktime;
  
  	return min_vdisktime;
  }
  
  static void update_min_vdisktime(struct cfq_rb_root *st)
  {
25bc6b077   Vivek Goyal   blkio: Introduce ...
543
  	struct cfq_group *cfqg;
25bc6b077   Vivek Goyal   blkio: Introduce ...
544
545
  	if (st->left) {
  		cfqg = rb_entry_cfqg(st->left);
a60327107   Gui Jianfeng   cfq-iosched: Fix ...
546
547
  		st->min_vdisktime = max_vdisktime(st->min_vdisktime,
  						  cfqg->vdisktime);
25bc6b077   Vivek Goyal   blkio: Introduce ...
548
  	}
25bc6b077   Vivek Goyal   blkio: Introduce ...
549
  }
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
550
551
552
553
554
  /*
   * get averaged number of queues of RT/BE priority.
   * average is updated, with a formula that gives more weight to higher numbers,
   * to quickly follows sudden increases and decrease slowly
   */
58ff82f34   Vivek Goyal   blkio: Implement ...
555
556
  static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
  					struct cfq_group *cfqg, bool rt)
5869619cb   Jens Axboe   cfq-iosched: fix ...
557
  {
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
558
559
560
  	unsigned min_q, max_q;
  	unsigned mult  = cfq_hist_divisor - 1;
  	unsigned round = cfq_hist_divisor / 2;
58ff82f34   Vivek Goyal   blkio: Implement ...
561
  	unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
562

58ff82f34   Vivek Goyal   blkio: Implement ...
563
564
565
  	min_q = min(cfqg->busy_queues_avg[rt], busy);
  	max_q = max(cfqg->busy_queues_avg[rt], busy);
  	cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
566
  		cfq_hist_divisor;
58ff82f34   Vivek Goyal   blkio: Implement ...
567
568
569
570
571
572
573
574
575
  	return cfqg->busy_queues_avg[rt];
  }
  
  static inline unsigned
  cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
  
  	return cfq_target_latency * cfqg->weight / st->total_weight;
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
576
  }
c553f8e33   Shaohua Li   block cfq: compen...
577
  static inline unsigned
ba5bd520f   Vivek Goyal   cfq: rename a fun...
578
  cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
44f7c1606   Jens Axboe   cfq-iosched: defe...
579
  {
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
580
581
  	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
  	if (cfqd->cfq_latency) {
58ff82f34   Vivek Goyal   blkio: Implement ...
582
583
584
585
586
587
  		/*
  		 * interested queues (we consider only the ones with the same
  		 * priority class in the cfq group)
  		 */
  		unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
  						cfq_class_rt(cfqq));
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
588
589
  		unsigned sync_slice = cfqd->cfq_slice[1];
  		unsigned expect_latency = sync_slice * iq;
58ff82f34   Vivek Goyal   blkio: Implement ...
590
591
592
  		unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
  
  		if (expect_latency > group_slice) {
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
593
594
595
596
597
598
599
  			unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
  			/* scale low_slice according to IO priority
  			 * and sync vs async */
  			unsigned low_slice =
  				min(slice, base_low_slice * slice / sync_slice);
  			/* the adapted slice value is scaled to fit all iqs
  			 * into the target latency */
58ff82f34   Vivek Goyal   blkio: Implement ...
600
  			slice = max(slice * group_slice / expect_latency,
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
601
602
603
  				    low_slice);
  		}
  	}
c553f8e33   Shaohua Li   block cfq: compen...
604
605
606
607
608
609
  	return slice;
  }
  
  static inline void
  cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
ba5bd520f   Vivek Goyal   cfq: rename a fun...
610
  	unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e33   Shaohua Li   block cfq: compen...
611

dae739ebc   Vivek Goyal   blkio: Group time...
612
  	cfqq->slice_start = jiffies;
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
613
  	cfqq->slice_end = jiffies + slice;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
614
  	cfqq->allocated_slice = slice;
7b679138b   Jens Axboe   cfq-iosched: add ...
615
  	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
44f7c1606   Jens Axboe   cfq-iosched: defe...
616
617
618
619
620
621
622
  }
  
  /*
   * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
   * isn't valid until the first request from the dispatch is activated
   * and the slice time set.
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
623
  static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c1606   Jens Axboe   cfq-iosched: defe...
624
625
  {
  	if (cfq_cfqq_slice_new(cfqq))
c1e44756f   Shaohua Li   cfq-iosched: do c...
626
  		return false;
44f7c1606   Jens Axboe   cfq-iosched: defe...
627
  	if (time_before(jiffies, cfqq->slice_end))
c1e44756f   Shaohua Li   cfq-iosched: do c...
628
  		return false;
44f7c1606   Jens Axboe   cfq-iosched: defe...
629

c1e44756f   Shaohua Li   cfq-iosched: do c...
630
  	return true;
44f7c1606   Jens Axboe   cfq-iosched: defe...
631
632
633
  }
  
  /*
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
634
   * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
635
   * We choose the request that is closest to the head right now. Distance
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
636
   * behind the head is penalized and only allowed to a certain extent.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
637
   */
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
638
  static struct request *
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
639
  cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
640
  {
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
641
  	sector_t s1, s2, d1 = 0, d2 = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
642
  	unsigned long back_max;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
643
644
645
  #define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
  #define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
  	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
646

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
647
648
649
650
  	if (rq1 == NULL || rq1 == rq2)
  		return rq2;
  	if (rq2 == NULL)
  		return rq1;
9c2c38a12   Jens Axboe   [PATCH] cfq-iosch...
651

229836bd6   Namhyung Kim   cfq-iosched: redu...
652
653
  	if (rq_is_sync(rq1) != rq_is_sync(rq2))
  		return rq_is_sync(rq1) ? rq1 : rq2;
65299a3b7   Christoph Hellwig   block: separate p...
654
655
  	if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
  		return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
b53d1ed73   Jens Axboe   Revert "cfq: Remo...
656

83096ebf1   Tejun Heo   block: convert to...
657
658
  	s1 = blk_rq_pos(rq1);
  	s2 = blk_rq_pos(rq2);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
659

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
  	/*
  	 * by definition, 1KiB is 2 sectors
  	 */
  	back_max = cfqd->cfq_back_max * 2;
  
  	/*
  	 * Strict one way elevator _except_ in the case where we allow
  	 * short backward seeks which are biased as twice the cost of a
  	 * similar forward seek.
  	 */
  	if (s1 >= last)
  		d1 = s1 - last;
  	else if (s1 + back_max >= last)
  		d1 = (last - s1) * cfqd->cfq_back_penalty;
  	else
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
675
  		wrap |= CFQ_RQ1_WRAP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
676
677
678
679
680
681
  
  	if (s2 >= last)
  		d2 = s2 - last;
  	else if (s2 + back_max >= last)
  		d2 = (last - s2) * cfqd->cfq_back_penalty;
  	else
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
682
  		wrap |= CFQ_RQ2_WRAP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
683
684
  
  	/* Found required data */
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
685
686
687
688
689
690
  
  	/*
  	 * By doing switch() on the bit mask "wrap" we avoid having to
  	 * check two variables for all permutations: --> faster!
  	 */
  	switch (wrap) {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
691
  	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
692
  		if (d1 < d2)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
693
  			return rq1;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
694
  		else if (d2 < d1)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
695
  			return rq2;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
696
697
  		else {
  			if (s1 >= s2)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
698
  				return rq1;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
699
  			else
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
700
  				return rq2;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
701
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
702

e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
703
  	case CFQ_RQ2_WRAP:
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
704
  		return rq1;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
705
  	case CFQ_RQ1_WRAP:
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
706
707
  		return rq2;
  	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
708
709
710
711
712
713
714
715
  	default:
  		/*
  		 * Since both rqs are wrapped,
  		 * start with the one that's further behind head
  		 * (--> only *one* back seek required),
  		 * since back seek takes more time than forward.
  		 */
  		if (s1 <= s2)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
716
  			return rq1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
717
  		else
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
718
  			return rq2;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
719
720
  	}
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
721
722
723
  /*
   * The below is leftmost cache rbtree addon
   */
0871714e0   Jens Axboe   cfq-iosched: rela...
724
  static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
725
  {
615f0259e   Vivek Goyal   blkio: Implement ...
726
727
728
  	/* Service tree is empty */
  	if (!root->count)
  		return NULL;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
729
730
  	if (!root->left)
  		root->left = rb_first(&root->rb);
0871714e0   Jens Axboe   cfq-iosched: rela...
731
732
733
734
  	if (root->left)
  		return rb_entry(root->left, struct cfq_queue, rb_node);
  
  	return NULL;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
735
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
736
737
738
739
740
741
742
743
744
745
  static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
  {
  	if (!root->left)
  		root->left = rb_first(&root->rb);
  
  	if (root->left)
  		return rb_entry_cfqg(root->left);
  
  	return NULL;
  }
a36e71f99   Jens Axboe   cfq-iosched: add ...
746
747
748
749
750
  static void rb_erase_init(struct rb_node *n, struct rb_root *root)
  {
  	rb_erase(n, root);
  	RB_CLEAR_NODE(n);
  }
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
751
752
753
754
  static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
  {
  	if (root->left == n)
  		root->left = NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
755
  	rb_erase_init(n, &root->rb);
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
756
  	--root->count;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
757
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
758
759
760
  /*
   * would be nice to take fifo expire time into account as well
   */
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
761
762
763
  static struct request *
  cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  		  struct request *last)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
764
  {
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
765
766
  	struct rb_node *rbnext = rb_next(&last->rb_node);
  	struct rb_node *rbprev = rb_prev(&last->rb_node);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
767
  	struct request *next = NULL, *prev = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
768

21183b07e   Jens Axboe   [PATCH] cfq-iosch...
769
  	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
770
771
  
  	if (rbprev)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
772
  		prev = rb_entry_rq(rbprev);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
773

21183b07e   Jens Axboe   [PATCH] cfq-iosch...
774
  	if (rbnext)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
775
  		next = rb_entry_rq(rbnext);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
776
777
778
  	else {
  		rbnext = rb_first(&cfqq->sort_list);
  		if (rbnext && rbnext != &last->rb_node)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
779
  			next = rb_entry_rq(rbnext);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
780
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
781

cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
782
  	return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
783
  }
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
784
785
  static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  				      struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
786
  {
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
787
788
789
  	/*
  	 * just an approximation, should be ok.
  	 */
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
790
  	return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
464191c65   Jens Axboe   Revert "cfq: Make...
791
  		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
792
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
  static inline s64
  cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
  {
  	return cfqg->vdisktime - st->min_vdisktime;
  }
  
  static void
  __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  {
  	struct rb_node **node = &st->rb.rb_node;
  	struct rb_node *parent = NULL;
  	struct cfq_group *__cfqg;
  	s64 key = cfqg_key(st, cfqg);
  	int left = 1;
  
  	while (*node != NULL) {
  		parent = *node;
  		__cfqg = rb_entry_cfqg(parent);
  
  		if (key < cfqg_key(st, __cfqg))
  			node = &parent->rb_left;
  		else {
  			node = &parent->rb_right;
  			left = 0;
  		}
  	}
  
  	if (left)
  		st->left = &cfqg->rb_node;
  
  	rb_link_node(&cfqg->rb_node, parent, node);
  	rb_insert_color(&cfqg->rb_node, &st->rb);
  }
  
  static void
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
  cfq_update_group_weight(struct cfq_group *cfqg)
  {
  	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  	if (cfqg->needs_update) {
  		cfqg->weight = cfqg->new_weight;
  		cfqg->needs_update = false;
  	}
  }
  
  static void
  cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  {
  	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  
  	cfq_update_group_weight(cfqg);
  	__cfq_group_service_tree_add(st, cfqg);
  	st->total_weight += cfqg->weight;
  }
  
  static void
  cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
849
850
851
852
853
854
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
  	struct cfq_group *__cfqg;
  	struct rb_node *n;
  
  	cfqg->nr_cfqq++;
760701bfe   Gui Jianfeng   cfq-iosched: Get ...
855
  	if (!RB_EMPTY_NODE(&cfqg->rb_node))
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
856
857
858
859
860
  		return;
  
  	/*
  	 * Currently put the group at the end. Later implement something
  	 * so that groups get lesser vtime based on their weights, so that
25985edce   Lucas De Marchi   Fix common misspe...
861
  	 * if group does not loose all if it was not continuously backlogged.
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
862
863
864
865
866
867
868
  	 */
  	n = rb_last(&st->rb);
  	if (n) {
  		__cfqg = rb_entry_cfqg(n);
  		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
  	} else
  		cfqg->vdisktime = st->min_vdisktime;
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
869
870
  	cfq_group_service_tree_add(st, cfqg);
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
871

8184f93ec   Justin TerAvest   cfq-iosched: Don'...
872
873
874
875
876
877
  static void
  cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
  {
  	st->total_weight -= cfqg->weight;
  	if (!RB_EMPTY_NODE(&cfqg->rb_node))
  		cfq_rb_erase(&cfqg->rb_node, st);
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
878
879
880
  }
  
  static void
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
881
  cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
882
883
884
885
886
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
  
  	BUG_ON(cfqg->nr_cfqq < 1);
  	cfqg->nr_cfqq--;
25bc6b077   Vivek Goyal   blkio: Introduce ...
887

1fa8f6d68   Vivek Goyal   blkio: Introduce ...
888
889
890
  	/* If there are other cfq queues under this group, don't delete it */
  	if (cfqg->nr_cfqq)
  		return;
2868ef7b3   Vivek Goyal   blkio: Some debug...
891
  	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
892
  	cfq_group_service_tree_del(st, cfqg);
dae739ebc   Vivek Goyal   blkio: Group time...
893
  	cfqg->saved_workload_slice = 0;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
894
  	cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
dae739ebc   Vivek Goyal   blkio: Group time...
895
  }
167400d34   Justin TerAvest   blk-cgroup: Add u...
896
897
  static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
  						unsigned int *unaccounted_time)
dae739ebc   Vivek Goyal   blkio: Group time...
898
  {
f75edf2dc   Vivek Goyal   blkio: Wait for c...
899
  	unsigned int slice_used;
dae739ebc   Vivek Goyal   blkio: Group time...
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
  
  	/*
  	 * Queue got expired before even a single request completed or
  	 * got expired immediately after first request completion.
  	 */
  	if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
  		/*
  		 * Also charge the seek time incurred to the group, otherwise
  		 * if there are mutiple queues in the group, each can dispatch
  		 * a single request on seeky media and cause lots of seek time
  		 * and group will never know it.
  		 */
  		slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
  					1);
  	} else {
  		slice_used = jiffies - cfqq->slice_start;
167400d34   Justin TerAvest   blk-cgroup: Add u...
916
917
  		if (slice_used > cfqq->allocated_slice) {
  			*unaccounted_time = slice_used - cfqq->allocated_slice;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
918
  			slice_used = cfqq->allocated_slice;
167400d34   Justin TerAvest   blk-cgroup: Add u...
919
920
921
922
  		}
  		if (time_after(cfqq->slice_start, cfqq->dispatch_start))
  			*unaccounted_time += cfqq->slice_start -
  					cfqq->dispatch_start;
dae739ebc   Vivek Goyal   blkio: Group time...
923
  	}
dae739ebc   Vivek Goyal   blkio: Group time...
924
925
926
927
  	return slice_used;
  }
  
  static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
928
  				struct cfq_queue *cfqq)
dae739ebc   Vivek Goyal   blkio: Group time...
929
930
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
167400d34   Justin TerAvest   blk-cgroup: Add u...
931
  	unsigned int used_sl, charge, unaccounted_sl = 0;
f26bd1f0a   Vivek Goyal   blkio: Determine ...
932
933
934
935
  	int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
  			- cfqg->service_tree_idle.count;
  
  	BUG_ON(nr_sync < 0);
167400d34   Justin TerAvest   blk-cgroup: Add u...
936
  	used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
dae739ebc   Vivek Goyal   blkio: Group time...
937

02b35081f   Vivek Goyal   cfq-iosched: Do g...
938
939
940
941
  	if (iops_mode(cfqd))
  		charge = cfqq->slice_dispatch;
  	else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
  		charge = cfqq->allocated_slice;
dae739ebc   Vivek Goyal   blkio: Group time...
942
943
  
  	/* Can't update vdisktime while group is on service tree */
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
944
  	cfq_group_service_tree_del(st, cfqg);
02b35081f   Vivek Goyal   cfq-iosched: Do g...
945
  	cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
946
947
  	/* If a new weight was requested, update now, off tree */
  	cfq_group_service_tree_add(st, cfqg);
dae739ebc   Vivek Goyal   blkio: Group time...
948
949
950
951
952
953
954
955
956
  
  	/* This group is being expired. Save the context */
  	if (time_after(cfqd->workload_expires, jiffies)) {
  		cfqg->saved_workload_slice = cfqd->workload_expires
  						- jiffies;
  		cfqg->saved_workload = cfqd->serving_type;
  		cfqg->saved_serving_prio = cfqd->serving_prio;
  	} else
  		cfqg->saved_workload_slice = 0;
2868ef7b3   Vivek Goyal   blkio: Some debug...
957
958
959
  
  	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
  					st->min_vdisktime);
fd16d2631   Joe Perches   block: Add __attr...
960
961
962
963
  	cfq_log_cfqq(cfqq->cfqd, cfqq,
  		     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
  		     used_sl, cfqq->slice_dispatch, charge,
  		     iops_mode(cfqd), cfqq->nr_sectors);
167400d34   Justin TerAvest   blk-cgroup: Add u...
964
965
  	cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
  					  unaccounted_sl);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
966
  	cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
967
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
968
969
970
971
972
973
974
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
  {
  	if (blkg)
  		return container_of(blkg, struct cfq_group, blkg);
  	return NULL;
  }
8aea45451   Paul Bolle   CFQ: make two fun...
975
976
  static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
  					  unsigned int weight)
f8d461d69   Vivek Goyal   blkio: Propagate ...
977
  {
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
978
979
980
  	struct cfq_group *cfqg = cfqg_of_blkg(blkg);
  	cfqg->new_weight = weight;
  	cfqg->needs_update = true;
f8d461d69   Vivek Goyal   blkio: Propagate ...
981
  }
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
982
983
  static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
  			struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
984
  {
220841906   Vivek Goyal   blkio: Export dis...
985
986
  	struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
  	unsigned int major, minor;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
987

f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
988
989
990
991
992
993
994
  	/*
  	 * Add group onto cgroup list. It might happen that bdi->dev is
  	 * not initialized yet. Initialize this new group without major
  	 * and minor info and this info will be filled in once a new thread
  	 * comes for IO.
  	 */
  	if (bdi->dev) {
a74b2adae   Ricky Benitez   block: expose the...
995
  		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
  		cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
  					(void *)cfqd, MKDEV(major, minor));
  	} else
  		cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
  					(void *)cfqd, 0);
  
  	cfqd->nr_blkcg_linked_grps++;
  	cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
  
  	/* Add group on cfqd list */
  	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
  }
  
  /*
   * Should be called from sleepable context. No request queue lock as per
   * cpu stats are allocated dynamically and alloc_percpu needs to be called
   * from sleepable context.
   */
  static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
  {
  	struct cfq_group *cfqg = NULL;
5624a4e44   Vivek Goyal   blk-throttle: Mak...
1017
  	int i, j, ret;
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1018
  	struct cfq_rb_root *st;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1019
1020
1021
  
  	cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
  	if (!cfqg)
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1022
  		return NULL;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1023

25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1024
1025
1026
  	for_each_cfqg_st(cfqg, i, j, st)
  		*st = CFQ_RB_ROOT;
  	RB_CLEAR_NODE(&cfqg->rb_node);
7700fc4f6   Shaohua Li   CFQ: add think ti...
1027
  	cfqg->ttime.last_end_request = jiffies;
b1c357696   Vivek Goyal   blkio: Take care ...
1028
1029
1030
1031
1032
1033
  	/*
  	 * Take the initial reference that will be released on destroy
  	 * This can be thought of a joint reference by cgroup and
  	 * elevator which will be dropped by either elevator exit
  	 * or cgroup deletion path depending on who is exiting first.
  	 */
329a67815   Shaohua Li   block cfq: don't ...
1034
  	cfqg->ref = 1;
5624a4e44   Vivek Goyal   blk-throttle: Mak...
1035
1036
1037
1038
1039
1040
  
  	ret = blkio_alloc_blkg_stats(&cfqg->blkg);
  	if (ret) {
  		kfree(cfqg);
  		return NULL;
  	}
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
  	return cfqg;
  }
  
  static struct cfq_group *
  cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
  {
  	struct cfq_group *cfqg = NULL;
  	void *key = cfqd;
  	struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
  	unsigned int major, minor;
b1c357696   Vivek Goyal   blkio: Take care ...
1051

180be2a04   Vivek Goyal   cfq-iosched: fix ...
1052
  	/*
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1053
1054
  	 * This is the common case when there are no blkio cgroups.
  	 * Avoid lookup in this case
180be2a04   Vivek Goyal   cfq-iosched: fix ...
1055
  	 */
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1056
1057
1058
1059
  	if (blkcg == &blkio_root_cgroup)
  		cfqg = &cfqd->root_group;
  	else
  		cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1060

f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1061
1062
1063
1064
  	if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
  		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
  		cfqg->blkg.dev = MKDEV(major, minor);
  	}
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1065

25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1066
1067
1068
1069
  	return cfqg;
  }
  
  /*
3e59cf9d6   Vivek Goyal   cfq-iosched: Get ...
1070
1071
   * Search for the cfq group current task belongs to. request_queue lock must
   * be held.
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1072
   */
3e59cf9d6   Vivek Goyal   cfq-iosched: Get ...
1073
  static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1074
  {
70087dc38   Vivek Goyal   blk-throttle: Use...
1075
  	struct blkio_cgroup *blkcg;
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1076
1077
  	struct cfq_group *cfqg = NULL, *__cfqg = NULL;
  	struct request_queue *q = cfqd->queue;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1078
1079
  
  	rcu_read_lock();
70087dc38   Vivek Goyal   blk-throttle: Use...
1080
  	blkcg = task_blkio_cgroup(current);
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
  	cfqg = cfq_find_cfqg(cfqd, blkcg);
  	if (cfqg) {
  		rcu_read_unlock();
  		return cfqg;
  	}
  
  	/*
  	 * Need to allocate a group. Allocation of group also needs allocation
  	 * of per cpu stats which in-turn takes a mutex() and can block. Hence
  	 * we need to drop rcu lock and queue_lock before we call alloc.
  	 *
  	 * Not taking any queue reference here and assuming that queue is
  	 * around by the time we return. CFQ queue allocation code does
  	 * the same. It might be racy though.
  	 */
  
  	rcu_read_unlock();
  	spin_unlock_irq(q->queue_lock);
  
  	cfqg = cfq_alloc_cfqg(cfqd);
  
  	spin_lock_irq(q->queue_lock);
  
  	rcu_read_lock();
  	blkcg = task_blkio_cgroup(current);
  
  	/*
  	 * If some other thread already allocated the group while we were
  	 * not holding queue lock, free up the group
  	 */
  	__cfqg = cfq_find_cfqg(cfqd, blkcg);
  
  	if (__cfqg) {
  		kfree(cfqg);
  		rcu_read_unlock();
  		return __cfqg;
  	}
3e59cf9d6   Vivek Goyal   cfq-iosched: Get ...
1118
  	if (!cfqg)
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1119
  		cfqg = &cfqd->root_group;
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1120
1121
  
  	cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1122
1123
1124
  	rcu_read_unlock();
  	return cfqg;
  }
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1125
1126
  static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
  {
329a67815   Shaohua Li   block cfq: don't ...
1127
  	cfqg->ref++;
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1128
1129
  	return cfqg;
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1130
1131
1132
1133
1134
1135
1136
  static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
  {
  	/* Currently, all async queues are mapped to root group */
  	if (!cfq_cfqq_sync(cfqq))
  		cfqg = &cfqq->cfqd->root_group;
  
  	cfqq->cfqg = cfqg;
b1c357696   Vivek Goyal   blkio: Take care ...
1137
  	/* cfqq reference on cfqg */
329a67815   Shaohua Li   block cfq: don't ...
1138
  	cfqq->cfqg->ref++;
b1c357696   Vivek Goyal   blkio: Take care ...
1139
1140
1141
1142
1143
1144
  }
  
  static void cfq_put_cfqg(struct cfq_group *cfqg)
  {
  	struct cfq_rb_root *st;
  	int i, j;
329a67815   Shaohua Li   block cfq: don't ...
1145
1146
1147
  	BUG_ON(cfqg->ref <= 0);
  	cfqg->ref--;
  	if (cfqg->ref)
b1c357696   Vivek Goyal   blkio: Take care ...
1148
1149
  		return;
  	for_each_cfqg_st(cfqg, i, j, st)
b54ce60eb   Gui Jianfeng   cfq-iosched: Get ...
1150
  		BUG_ON(!RB_EMPTY_ROOT(&st->rb));
5624a4e44   Vivek Goyal   blk-throttle: Mak...
1151
  	free_percpu(cfqg->blkg.stats_cpu);
b1c357696   Vivek Goyal   blkio: Take care ...
1152
1153
1154
1155
1156
1157
1158
1159
1160
  	kfree(cfqg);
  }
  
  static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
  {
  	/* Something wrong if we are trying to remove same group twice */
  	BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
  
  	hlist_del_init(&cfqg->cfqd_node);
a5395b83b   Vivek Goyal   cfq-iosched: Redu...
1161
1162
  	BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
  	cfqd->nr_blkcg_linked_grps--;
b1c357696   Vivek Goyal   blkio: Take care ...
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
  	/*
  	 * Put the reference taken at the time of creation so that when all
  	 * queues are gone, group can be destroyed.
  	 */
  	cfq_put_cfqg(cfqg);
  }
  
  static void cfq_release_cfq_groups(struct cfq_data *cfqd)
  {
  	struct hlist_node *pos, *n;
  	struct cfq_group *cfqg;
  
  	hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
  		/*
  		 * If cgroup removal path got to blk_group first and removed
  		 * it from cgroup list, then it will take care of destroying
  		 * cfqg also.
  		 */
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1181
  		if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
b1c357696   Vivek Goyal   blkio: Take care ...
1182
1183
  			cfq_destroy_cfqg(cfqd, cfqg);
  	}
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1184
  }
b1c357696   Vivek Goyal   blkio: Take care ...
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
  
  /*
   * Blk cgroup controller notification saying that blkio_group object is being
   * delinked as associated cgroup object is going away. That also means that
   * no new IO will come in this group. So get rid of this group as soon as
   * any pending IO in the group is finished.
   *
   * This function is called under rcu_read_lock(). key is the rcu protected
   * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
   * read lock.
   *
   * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
   * it should not be NULL as even if elevator was exiting, cgroup deltion
   * path got to it first.
   */
8aea45451   Paul Bolle   CFQ: make two fun...
1200
  static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
b1c357696   Vivek Goyal   blkio: Take care ...
1201
1202
1203
1204
1205
1206
1207
1208
  {
  	unsigned long  flags;
  	struct cfq_data *cfqd = key;
  
  	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  	cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
  	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1209
  #else /* GROUP_IOSCHED */
3e59cf9d6   Vivek Goyal   cfq-iosched: Get ...
1210
  static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1211
1212
1213
  {
  	return &cfqd->root_group;
  }
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1214
1215
1216
  
  static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
  {
50eaeb323   Dmitry Monakhov   cfq-iosched: fix ...
1217
  	return cfqg;
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1218
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1219
1220
1221
1222
  static inline void
  cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
  	cfqq->cfqg = cfqg;
  }
b1c357696   Vivek Goyal   blkio: Take care ...
1223
1224
  static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
  static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1225
  #endif /* GROUP_IOSCHED */
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1226
  /*
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1227
   * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1228
1229
1230
   * requests waiting to be processed. It is sorted in the order that
   * we will service the queues.
   */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1231
  static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1232
  				 bool add_front)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1233
  {
0871714e0   Jens Axboe   cfq-iosched: rela...
1234
1235
  	struct rb_node **p, *parent;
  	struct cfq_queue *__cfqq;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1236
  	unsigned long rb_key;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1237
  	struct cfq_rb_root *service_tree;
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1238
  	int left;
dae739ebc   Vivek Goyal   blkio: Group time...
1239
  	int new_cfqq = 1;
ae30c2865   Vivek Goyal   blkio: Implement ...
1240

cdb16e8f7   Vivek Goyal   blkio: Introduce ...
1241
  	service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
65b32a573   Vivek Goyal   cfq-iosched: Remo...
1242
  						cfqq_type(cfqq));
0871714e0   Jens Axboe   cfq-iosched: rela...
1243
1244
  	if (cfq_class_idle(cfqq)) {
  		rb_key = CFQ_IDLE_DELAY;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1245
  		parent = rb_last(&service_tree->rb);
0871714e0   Jens Axboe   cfq-iosched: rela...
1246
1247
1248
1249
1250
1251
  		if (parent && parent != &cfqq->rb_node) {
  			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  			rb_key += __cfqq->rb_key;
  		} else
  			rb_key += jiffies;
  	} else if (!add_front) {
b9c8946b1   Jens Axboe   cfq-iosched: fix ...
1252
1253
1254
1255
1256
1257
  		/*
  		 * Get our rb key offset. Subtract any residual slice
  		 * value carried from last service. A negative resid
  		 * count indicates slice overrun, and this should position
  		 * the next service time further away in the tree.
  		 */
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1258
  		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
b9c8946b1   Jens Axboe   cfq-iosched: fix ...
1259
  		rb_key -= cfqq->slice_resid;
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1260
  		cfqq->slice_resid = 0;
48e025e63   Corrado Zoccolo   cfq-iosched: fix ...
1261
1262
  	} else {
  		rb_key = -HZ;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1263
  		__cfqq = cfq_rb_first(service_tree);
48e025e63   Corrado Zoccolo   cfq-iosched: fix ...
1264
1265
  		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1266

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1267
  	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
dae739ebc   Vivek Goyal   blkio: Group time...
1268
  		new_cfqq = 0;
99f9628ab   Jens Axboe   [PATCH] cfq-iosch...
1269
  		/*
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1270
  		 * same position, nothing more to do
99f9628ab   Jens Axboe   [PATCH] cfq-iosch...
1271
  		 */
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1272
1273
  		if (rb_key == cfqq->rb_key &&
  		    cfqq->service_tree == service_tree)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1274
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1275

aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1276
1277
  		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  		cfqq->service_tree = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1278
  	}
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1279

498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1280
  	left = 1;
0871714e0   Jens Axboe   cfq-iosched: rela...
1281
  	parent = NULL;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1282
1283
  	cfqq->service_tree = service_tree;
  	p = &service_tree->rb.rb_node;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1284
  	while (*p) {
67060e379   Jens Axboe   cfq-iosched: sort...
1285
  		struct rb_node **n;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1286

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1287
1288
  		parent = *p;
  		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
0c534e0a4   Jens Axboe   cfq-iosched: sort...
1289
  		/*
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1290
  		 * sort by key, that represents service time.
0c534e0a4   Jens Axboe   cfq-iosched: sort...
1291
  		 */
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1292
  		if (time_before(rb_key, __cfqq->rb_key))
67060e379   Jens Axboe   cfq-iosched: sort...
1293
  			n = &(*p)->rb_left;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1294
  		else {
67060e379   Jens Axboe   cfq-iosched: sort...
1295
  			n = &(*p)->rb_right;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1296
  			left = 0;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1297
  		}
67060e379   Jens Axboe   cfq-iosched: sort...
1298
1299
  
  		p = n;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1300
  	}
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1301
  	if (left)
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1302
  		service_tree->left = &cfqq->rb_node;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1303

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1304
1305
  	cfqq->rb_key = rb_key;
  	rb_link_node(&cfqq->rb_node, parent, p);
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1306
1307
  	rb_insert_color(&cfqq->rb_node, &service_tree->rb);
  	service_tree->count++;
20359f27e   Namhyung Kim   cfq-iosched: remo...
1308
  	if (add_front || !new_cfqq)
dae739ebc   Vivek Goyal   blkio: Group time...
1309
  		return;
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
1310
  	cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1311
  }
a36e71f99   Jens Axboe   cfq-iosched: add ...
1312
  static struct cfq_queue *
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1313
1314
1315
  cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
  		     sector_t sector, struct rb_node **ret_parent,
  		     struct rb_node ***rb_link)
a36e71f99   Jens Axboe   cfq-iosched: add ...
1316
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
  	struct rb_node **p, *parent;
  	struct cfq_queue *cfqq = NULL;
  
  	parent = NULL;
  	p = &root->rb_node;
  	while (*p) {
  		struct rb_node **n;
  
  		parent = *p;
  		cfqq = rb_entry(parent, struct cfq_queue, p_node);
  
  		/*
  		 * Sort strictly based on sector.  Smallest to the left,
  		 * largest to the right.
  		 */
2e46e8b27   Tejun Heo   block: drop reque...
1332
  		if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1333
  			n = &(*p)->rb_right;
2e46e8b27   Tejun Heo   block: drop reque...
1334
  		else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1335
1336
1337
1338
  			n = &(*p)->rb_left;
  		else
  			break;
  		p = n;
3ac6c9f8a   Jens Axboe   cfq-iosched: fix ...
1339
  		cfqq = NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1340
1341
1342
1343
1344
  	}
  
  	*ret_parent = parent;
  	if (rb_link)
  		*rb_link = p;
3ac6c9f8a   Jens Axboe   cfq-iosched: fix ...
1345
  	return cfqq;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1346
1347
1348
1349
  }
  
  static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
1350
1351
  	struct rb_node **p, *parent;
  	struct cfq_queue *__cfqq;
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1352
1353
1354
1355
  	if (cfqq->p_root) {
  		rb_erase(&cfqq->p_node, cfqq->p_root);
  		cfqq->p_root = NULL;
  	}
a36e71f99   Jens Axboe   cfq-iosched: add ...
1356
1357
1358
1359
1360
  
  	if (cfq_class_idle(cfqq))
  		return;
  	if (!cfqq->next_rq)
  		return;
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1361
  	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b27   Tejun Heo   block: drop reque...
1362
1363
  	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
  				      blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8a   Jens Axboe   cfq-iosched: fix ...
1364
1365
  	if (!__cfqq) {
  		rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1366
1367
1368
  		rb_insert_color(&cfqq->p_node, cfqq->p_root);
  	} else
  		cfqq->p_root = NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1369
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1370
1371
1372
  /*
   * Update cfqq's position in the service tree.
   */
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1373
  static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1374
  {
6d048f531   Jens Axboe   cfq-iosched: deve...
1375
1376
1377
  	/*
  	 * Resorting requires the cfqq to be on the RR list already.
  	 */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1378
  	if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1379
  		cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f99   Jens Axboe   cfq-iosched: add ...
1380
1381
  		cfq_prio_tree_add(cfqd, cfqq);
  	}
6d048f531   Jens Axboe   cfq-iosched: deve...
1382
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1383
1384
  /*
   * add to busy list of queues for service, trying to be fair in ordering
22e2c507c   Jens Axboe   [PATCH] Update cf...
1385
   * the pending list according to last request service
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1386
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1387
  static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1388
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
1389
  	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1390
1391
  	BUG_ON(cfq_cfqq_on_rr(cfqq));
  	cfq_mark_cfqq_on_rr(cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1392
  	cfqd->busy_queues++;
ef8a41df8   Shaohua Li   cfq-iosched: give...
1393
1394
  	if (cfq_cfqq_sync(cfqq))
  		cfqd->busy_sync_queues++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1395

edd75ffd9   Jens Axboe   cfq-iosched: get ...
1396
  	cfq_resort_rr_list(cfqd, cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1397
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1398
1399
1400
1401
  /*
   * Called when the cfqq no longer has requests pending, remove it from
   * the service tree.
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1402
  static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1403
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
1404
  	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1405
1406
  	BUG_ON(!cfq_cfqq_on_rr(cfqq));
  	cfq_clear_cfqq_on_rr(cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1407

aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1408
1409
1410
1411
  	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  		cfqq->service_tree = NULL;
  	}
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1412
1413
1414
1415
  	if (cfqq->p_root) {
  		rb_erase(&cfqq->p_node, cfqq->p_root);
  		cfqq->p_root = NULL;
  	}
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1416

8184f93ec   Justin TerAvest   cfq-iosched: Don'...
1417
  	cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1418
1419
  	BUG_ON(!cfqd->busy_queues);
  	cfqd->busy_queues--;
ef8a41df8   Shaohua Li   cfq-iosched: give...
1420
1421
  	if (cfq_cfqq_sync(cfqq))
  		cfqd->busy_sync_queues--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1422
1423
1424
1425
1426
  }
  
  /*
   * rb tree support functions
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1427
  static void cfq_del_rq_rb(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1428
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1429
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1430
  	const int sync = rq_is_sync(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1431

b4878f245   Jens Axboe   [PATCH] 02/05: up...
1432
1433
  	BUG_ON(!cfqq->queued[sync]);
  	cfqq->queued[sync]--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1434

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1435
  	elv_rb_del(&cfqq->sort_list, rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1436

f04a64246   Vivek Goyal   blkio: Keep queue...
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
  	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
  		/*
  		 * Queue will be deleted from service tree when we actually
  		 * expire it later. Right now just remove it from prio tree
  		 * as it is empty.
  		 */
  		if (cfqq->p_root) {
  			rb_erase(&cfqq->p_node, cfqq->p_root);
  			cfqq->p_root = NULL;
  		}
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1448
  }
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1449
  static void cfq_add_rq_rb(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1450
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1451
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1452
  	struct cfq_data *cfqd = cfqq->cfqd;
796d5116c   Jeff Moyer   iosched: prevent ...
1453
  	struct request *prev;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1454

5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1455
  	cfqq->queued[rq_is_sync(rq)]++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1456

796d5116c   Jeff Moyer   iosched: prevent ...
1457
  	elv_rb_add(&cfqq->sort_list, rq);
5fccbf61b   Jens Axboe   [PATCH] CFQ: requ...
1458
1459
1460
  
  	if (!cfq_cfqq_on_rr(cfqq))
  		cfq_add_cfqq_rr(cfqd, cfqq);
5044eed48   Jens Axboe   cfq-iosched: fix ...
1461
1462
1463
1464
  
  	/*
  	 * check if this request is a better next-serve candidate
  	 */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1465
  	prev = cfqq->next_rq;
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
1466
  	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
a36e71f99   Jens Axboe   cfq-iosched: add ...
1467
1468
1469
1470
1471
1472
  
  	/*
  	 * adjust priority tree position, if ->next_rq changes
  	 */
  	if (prev != cfqq->next_rq)
  		cfq_prio_tree_add(cfqd, cfqq);
5044eed48   Jens Axboe   cfq-iosched: fix ...
1473
  	BUG_ON(!cfqq->next_rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1474
  }
febffd618   Jens Axboe   cfq-iosched: kill...
1475
  static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1476
  {
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1477
1478
  	elv_rb_del(&cfqq->sort_list, rq);
  	cfqq->queued[rq_is_sync(rq)]--;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1479
1480
  	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
  					rq_data_dir(rq), rq_is_sync(rq));
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1481
  	cfq_add_rq_rb(rq);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1482
  	cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1483
1484
  			&cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
  			rq_is_sync(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1485
  }
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1486
1487
  static struct request *
  cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1488
  {
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1489
  	struct task_struct *tsk = current;
c58698073   Tejun Heo   block, cfq: reorg...
1490
  	struct cfq_io_cq *cic;
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1491
  	struct cfq_queue *cfqq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1492

4ac845a2e   Jens Axboe   block: cfq: make ...
1493
  	cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1494
1495
1496
1497
  	if (!cic)
  		return NULL;
  
  	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
1498
1499
  	if (cfqq) {
  		sector_t sector = bio->bi_sector + bio_sectors(bio);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1500
  		return elv_rb_find(&cfqq->sort_list, sector);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
1501
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1502

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1503
1504
  	return NULL;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1505
  static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1506
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
1507
  	struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1508

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1509
  	cfqd->rq_in_driver++;
7b679138b   Jens Axboe   cfq-iosched: add ...
1510
  	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1511
  						cfqd->rq_in_driver);
25776e359   Jens Axboe   [PATCH] cfq-iosch...
1512

5b93629b4   Tejun Heo   block: implement ...
1513
  	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1514
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1515
  static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1516
  {
b4878f245   Jens Axboe   [PATCH] 02/05: up...
1517
  	struct cfq_data *cfqd = q->elevator->elevator_data;
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1518
1519
  	WARN_ON(!cfqd->rq_in_driver);
  	cfqd->rq_in_driver--;
7b679138b   Jens Axboe   cfq-iosched: add ...
1520
  	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1521
  						cfqd->rq_in_driver);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1522
  }
b4878f245   Jens Axboe   [PATCH] 02/05: up...
1523
  static void cfq_remove_request(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1524
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1525
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1526

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1527
1528
  	if (cfqq->next_rq == rq)
  		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1529

b4878f245   Jens Axboe   [PATCH] 02/05: up...
1530
  	list_del_init(&rq->queuelist);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1531
  	cfq_del_rq_rb(rq);
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
1532

45333d5a3   Aaron Carroll   cfq-iosched: fix ...
1533
  	cfqq->cfqd->rq_queued--;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1534
1535
  	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
  					rq_data_dir(rq), rq_is_sync(rq));
65299a3b7   Christoph Hellwig   block: separate p...
1536
1537
1538
  	if (rq->cmd_flags & REQ_PRIO) {
  		WARN_ON(!cfqq->prio_pending);
  		cfqq->prio_pending--;
b53d1ed73   Jens Axboe   Revert "cfq: Remo...
1539
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1540
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1541
1542
  static int cfq_merge(struct request_queue *q, struct request **req,
  		     struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1543
1544
1545
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct request *__rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1546

206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1547
  	__rq = cfq_find_rq_fmerge(cfqd, bio);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1548
  	if (__rq && elv_rq_merge_ok(__rq, bio)) {
9817064b6   Jens Axboe   [PATCH] elevator:...
1549
1550
  		*req = __rq;
  		return ELEVATOR_FRONT_MERGE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1551
1552
1553
  	}
  
  	return ELEVATOR_NO_MERGE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1554
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1555
  static void cfq_merged_request(struct request_queue *q, struct request *req,
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1556
  			       int type)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1557
  {
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1558
  	if (type == ELEVATOR_FRONT_MERGE) {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1559
  		struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1560

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1561
  		cfq_reposition_rq_rb(cfqq, req);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1562
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1563
  }
812d40264   Divyesh Shah   blkio: Add io_mer...
1564
1565
1566
  static void cfq_bio_merged(struct request_queue *q, struct request *req,
  				struct bio *bio)
  {
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1567
1568
  	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
  					bio_data_dir(bio), cfq_bio_sync(bio));
812d40264   Divyesh Shah   blkio: Add io_mer...
1569
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1570
  static void
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1571
  cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1572
1573
  		    struct request *next)
  {
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
1574
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
4a0b75c7d   Shaohua Li   block, cfq: fix e...
1575
  	struct cfq_data *cfqd = q->elevator->elevator_data;
22e2c507c   Jens Axboe   [PATCH] Update cf...
1576
1577
1578
1579
  	/*
  	 * reposition in fifo if next is older than rq
  	 */
  	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
30996f40b   Jens Axboe   cfq-iosched: fix ...
1580
  	    time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
22e2c507c   Jens Axboe   [PATCH] Update cf...
1581
  		list_move(&rq->queuelist, &next->queuelist);
30996f40b   Jens Axboe   cfq-iosched: fix ...
1582
1583
  		rq_set_fifo_time(rq, rq_fifo_time(next));
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
1584

cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
1585
1586
  	if (cfqq->next_rq == next)
  		cfqq->next_rq = rq;
b4878f245   Jens Axboe   [PATCH] 02/05: up...
1587
  	cfq_remove_request(next);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1588
1589
  	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
  					rq_data_dir(next), rq_is_sync(next));
4a0b75c7d   Shaohua Li   block, cfq: fix e...
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
  
  	cfqq = RQ_CFQQ(next);
  	/*
  	 * all requests of this queue are merged to other queues, delete it
  	 * from the service tree. If it's the active_queue,
  	 * cfq_dispatch_requests() will choose to expire it or do idle
  	 */
  	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
  	    cfqq != cfqd->active_queue)
  		cfq_del_cfqq_rr(cfqd, cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1600
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1601
  static int cfq_allow_merge(struct request_queue *q, struct request *rq,
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1602
1603
1604
  			   struct bio *bio)
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
c58698073   Tejun Heo   block, cfq: reorg...
1605
  	struct cfq_io_cq *cic;
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1606
  	struct cfq_queue *cfqq;
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1607
1608
  
  	/*
ec8acb690   Jens Axboe   [PATCH] cfq-iosch...
1609
  	 * Disallow merge of a sync bio into an async request.
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1610
  	 */
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1611
  	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1612
  		return false;
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1613
1614
  
  	/*
f1a4f4d35   Tejun Heo   block, cfq: fix c...
1615
1616
1617
1618
1619
1620
  	 * Lookup the cfqq that this bio will be queued with and allow
  	 * merge only if rq is queued there.  This function can be called
  	 * from plug merge without queue_lock.  In such cases, ioc of @rq
  	 * and %current are guaranteed to be equal.  Avoid lookup which
  	 * requires queue_lock by using @rq's cic.
  	 */
c58698073   Tejun Heo   block, cfq: reorg...
1621
  	if (current->io_context == RQ_CIC(rq)->icq.ioc) {
f1a4f4d35   Tejun Heo   block, cfq: fix c...
1622
1623
1624
1625
1626
1627
  		cic = RQ_CIC(rq);
  	} else {
  		cic = cfq_cic_lookup(cfqd, current->io_context);
  		if (!cic)
  			return false;
  	}
719d34027   Jens Axboe   [PATCH] cfq-iosch...
1628

91fac317a   Vasily Tarasov   cfq-iosched: get ...
1629
  	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1630
  	return cfqq == RQ_CFQQ(rq);
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1631
  }
812df48d1   Divyesh Shah   blkio: Add more d...
1632
1633
1634
  static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	del_timer(&cfqd->idle_slice_timer);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1635
  	cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
812df48d1   Divyesh Shah   blkio: Add more d...
1636
  }
febffd618   Jens Axboe   cfq-iosched: kill...
1637
1638
  static void __cfq_set_active_queue(struct cfq_data *cfqd,
  				   struct cfq_queue *cfqq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
1639
1640
  {
  	if (cfqq) {
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
1641
1642
  		cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
  				cfqd->serving_prio, cfqd->serving_type);
62a37f6ba   Justin TerAvest   cfq-iosched: Don'...
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
  		cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
  		cfqq->slice_start = 0;
  		cfqq->dispatch_start = jiffies;
  		cfqq->allocated_slice = 0;
  		cfqq->slice_end = 0;
  		cfqq->slice_dispatch = 0;
  		cfqq->nr_sectors = 0;
  
  		cfq_clear_cfqq_wait_request(cfqq);
  		cfq_clear_cfqq_must_dispatch(cfqq);
  		cfq_clear_cfqq_must_alloc_slice(cfqq);
  		cfq_clear_cfqq_fifo_expire(cfqq);
  		cfq_mark_cfqq_slice_new(cfqq);
  
  		cfq_del_timer(cfqd, cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1658
1659
1660
1661
1662
1663
  	}
  
  	cfqd->active_queue = cfqq;
  }
  
  /*
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1664
1665
1666
1667
   * current cfqq expired its slice (or was too idle), select new one
   */
  static void
  __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1668
  		    bool timed_out)
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1669
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
1670
  	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1671
  	if (cfq_cfqq_wait_request(cfqq))
812df48d1   Divyesh Shah   blkio: Add more d...
1672
  		cfq_del_timer(cfqd, cfqq);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1673

7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1674
  	cfq_clear_cfqq_wait_request(cfqq);
f75edf2dc   Vivek Goyal   blkio: Wait for c...
1675
  	cfq_clear_cfqq_wait_busy(cfqq);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1676
1677
  
  	/*
ae54abed6   Shaohua Li   cfq-iosched: spli...
1678
1679
1680
1681
1682
1683
1684
1685
1686
  	 * If this cfqq is shared between multiple processes, check to
  	 * make sure that those processes are still issuing I/Os within
  	 * the mean seek distance.  If not, it may be time to break the
  	 * queues apart again.
  	 */
  	if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
  		cfq_mark_cfqq_split_coop(cfqq);
  
  	/*
6084cdda0   Jens Axboe   cfq-iosched: don'...
1687
  	 * store what was left of this slice, if the queue idled/timed out
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1688
  	 */
c553f8e33   Shaohua Li   block cfq: compen...
1689
1690
  	if (timed_out) {
  		if (cfq_cfqq_slice_new(cfqq))
ba5bd520f   Vivek Goyal   cfq: rename a fun...
1691
  			cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e33   Shaohua Li   block cfq: compen...
1692
1693
  		else
  			cfqq->slice_resid = cfqq->slice_end - jiffies;
7b679138b   Jens Axboe   cfq-iosched: add ...
1694
1695
  		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
  	}
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1696

e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1697
  	cfq_group_served(cfqd, cfqq->cfqg, cfqq);
dae739ebc   Vivek Goyal   blkio: Group time...
1698

f04a64246   Vivek Goyal   blkio: Keep queue...
1699
1700
  	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
  		cfq_del_cfqq_rr(cfqd, cfqq);
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1701
  	cfq_resort_rr_list(cfqd, cfqq);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1702
1703
1704
1705
1706
  
  	if (cfqq == cfqd->active_queue)
  		cfqd->active_queue = NULL;
  
  	if (cfqd->active_cic) {
c58698073   Tejun Heo   block, cfq: reorg...
1707
  		put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1708
1709
  		cfqd->active_cic = NULL;
  	}
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1710
  }
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1711
  static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1712
1713
1714
1715
  {
  	struct cfq_queue *cfqq = cfqd->active_queue;
  
  	if (cfqq)
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1716
  		__cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1717
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1718
1719
1720
1721
  /*
   * Get next queue for service. Unless we have a queue preemption,
   * we'll simply select the first cfqq in the service tree.
   */
6d048f531   Jens Axboe   cfq-iosched: deve...
1722
  static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507c   Jens Axboe   [PATCH] Update cf...
1723
  {
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1724
  	struct cfq_rb_root *service_tree =
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
1725
  		service_tree_for(cfqd->serving_group, cfqd->serving_prio,
65b32a573   Vivek Goyal   cfq-iosched: Remo...
1726
  					cfqd->serving_type);
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1727

f04a64246   Vivek Goyal   blkio: Keep queue...
1728
1729
  	if (!cfqd->rq_queued)
  		return NULL;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
1730
1731
1732
  	/* There is nothing to dispatch */
  	if (!service_tree)
  		return NULL;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1733
1734
1735
  	if (RB_EMPTY_ROOT(&service_tree->rb))
  		return NULL;
  	return cfq_rb_first(service_tree);
6d048f531   Jens Axboe   cfq-iosched: deve...
1736
  }
f04a64246   Vivek Goyal   blkio: Keep queue...
1737
1738
  static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
  {
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1739
  	struct cfq_group *cfqg;
f04a64246   Vivek Goyal   blkio: Keep queue...
1740
1741
1742
1743
1744
1745
  	struct cfq_queue *cfqq;
  	int i, j;
  	struct cfq_rb_root *st;
  
  	if (!cfqd->rq_queued)
  		return NULL;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1746
1747
1748
  	cfqg = cfq_get_next_cfqg(cfqd);
  	if (!cfqg)
  		return NULL;
f04a64246   Vivek Goyal   blkio: Keep queue...
1749
1750
1751
1752
1753
  	for_each_cfqg_st(cfqg, i, j, st)
  		if ((cfqq = cfq_rb_first(st)) != NULL)
  			return cfqq;
  	return NULL;
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1754
1755
1756
  /*
   * Get and set a new active queue for service.
   */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1757
1758
  static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
  					      struct cfq_queue *cfqq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1759
  {
e00ef7997   Jens Axboe   cfq-iosched: get ...
1760
  	if (!cfqq)
a36e71f99   Jens Axboe   cfq-iosched: add ...
1761
  		cfqq = cfq_get_next_queue(cfqd);
6d048f531   Jens Axboe   cfq-iosched: deve...
1762

22e2c507c   Jens Axboe   [PATCH] Update cf...
1763
  	__cfq_set_active_queue(cfqd, cfqq);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1764
  	return cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
1765
  }
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1766
1767
1768
  static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
  					  struct request *rq)
  {
83096ebf1   Tejun Heo   block: convert to...
1769
1770
  	if (blk_rq_pos(rq) >= cfqd->last_position)
  		return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1771
  	else
83096ebf1   Tejun Heo   block: convert to...
1772
  		return cfqd->last_position - blk_rq_pos(rq);
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1773
  }
b2c18e1e0   Jeff Moyer   cfq: calculate th...
1774
  static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1775
  			       struct request *rq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1776
  {
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1777
  	return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
6d048f531   Jens Axboe   cfq-iosched: deve...
1778
  }
a36e71f99   Jens Axboe   cfq-iosched: add ...
1779
1780
1781
  static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
  				    struct cfq_queue *cur_cfqq)
  {
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1782
  	struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f99   Jens Axboe   cfq-iosched: add ...
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
  	struct rb_node *parent, *node;
  	struct cfq_queue *__cfqq;
  	sector_t sector = cfqd->last_position;
  
  	if (RB_EMPTY_ROOT(root))
  		return NULL;
  
  	/*
  	 * First, if we find a request starting at the end of the last
  	 * request, choose it.
  	 */
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1794
  	__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f99   Jens Axboe   cfq-iosched: add ...
1795
1796
1797
1798
1799
1800
1801
1802
  	if (__cfqq)
  		return __cfqq;
  
  	/*
  	 * If the exact sector wasn't found, the parent of the NULL leaf
  	 * will contain the closest sector.
  	 */
  	__cfqq = rb_entry(parent, struct cfq_queue, p_node);
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1803
  	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1804
  		return __cfqq;
2e46e8b27   Tejun Heo   block: drop reque...
1805
  	if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f99   Jens Axboe   cfq-iosched: add ...
1806
1807
1808
1809
1810
1811
1812
  		node = rb_next(&__cfqq->p_node);
  	else
  		node = rb_prev(&__cfqq->p_node);
  	if (!node)
  		return NULL;
  
  	__cfqq = rb_entry(node, struct cfq_queue, p_node);
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1813
  	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
  		return __cfqq;
  
  	return NULL;
  }
  
  /*
   * cfqd - obvious
   * cur_cfqq - passed in so that we don't decide that the current queue is
   * 	      closely cooperating with itself.
   *
   * So, basically we're assuming that that cur_cfqq has dispatched at least
   * one request, and that cfqd->last_position reflects a position on the disk
   * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
   * assumption.
   */
  static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d0408   Jeff Moyer   cfq: change the m...
1830
  					      struct cfq_queue *cur_cfqq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1831
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
1832
  	struct cfq_queue *cfqq;
39c01b219   Divyesh Shah   cfq-iosched: Do n...
1833
1834
  	if (cfq_class_idle(cur_cfqq))
  		return NULL;
e6c5bc737   Jeff Moyer   cfq: break apart ...
1835
1836
1837
1838
  	if (!cfq_cfqq_sync(cur_cfqq))
  		return NULL;
  	if (CFQQ_SEEKY(cur_cfqq))
  		return NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1839
  	/*
b9d8f4c73   Gui Jianfeng   cfq: Optimization...
1840
1841
1842
1843
1844
1845
  	 * Don't search priority tree if it's the only queue in the group.
  	 */
  	if (cur_cfqq->cfqg->nr_cfqq == 1)
  		return NULL;
  
  	/*
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1846
1847
1848
  	 * We should notice if some of the queues are cooperating, eg
  	 * working closely on the same area of the disk. In that case,
  	 * we can group them together and don't waste time idling.
6d048f531   Jens Axboe   cfq-iosched: deve...
1849
  	 */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1850
1851
1852
  	cfqq = cfqq_close(cfqd, cur_cfqq);
  	if (!cfqq)
  		return NULL;
8682e1f15   Vivek Goyal   blkio: Provide so...
1853
1854
1855
  	/* If new queue belongs to different cfq_group, don't choose it */
  	if (cur_cfqq->cfqg != cfqq->cfqg)
  		return NULL;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1856
1857
1858
1859
1860
  	/*
  	 * It only makes sense to merge sync queues.
  	 */
  	if (!cfq_cfqq_sync(cfqq))
  		return NULL;
e6c5bc737   Jeff Moyer   cfq: break apart ...
1861
1862
  	if (CFQQ_SEEKY(cfqq))
  		return NULL;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1863

c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1864
1865
1866
1867
1868
  	/*
  	 * Do not merge queues of different priority classes
  	 */
  	if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
  		return NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1869
  	return cfqq;
6d048f531   Jens Axboe   cfq-iosched: deve...
1870
  }
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1871
1872
1873
1874
1875
1876
1877
  /*
   * Determine whether we should enforce idle window for this queue.
   */
  
  static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	enum wl_prio_t prio = cfqq_prio(cfqq);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
1878
  	struct cfq_rb_root *service_tree = cfqq->service_tree;
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1879

f04a64246   Vivek Goyal   blkio: Keep queue...
1880
1881
  	BUG_ON(!service_tree);
  	BUG_ON(!service_tree->count);
b6508c161   Vivek Goyal   cfq-iosched: Do n...
1882
1883
  	if (!cfqd->cfq_slice_idle)
  		return false;
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1884
1885
1886
1887
1888
  	/* We never do for idle class queues. */
  	if (prio == IDLE_WORKLOAD)
  		return false;
  
  	/* We do for queues that were marked with idle window flag. */
3c764b7a6   Shaohua Li   cfq-iosched: make...
1889
1890
  	if (cfq_cfqq_idle_window(cfqq) &&
  	   !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1891
1892
1893
1894
1895
1896
  		return true;
  
  	/*
  	 * Otherwise, we do only if they are the last ones
  	 * in their service tree.
  	 */
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
1897
1898
  	if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
  	   !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
c1e44756f   Shaohua Li   cfq-iosched: do c...
1899
  		return true;
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
1900
1901
  	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
  			service_tree->count);
c1e44756f   Shaohua Li   cfq-iosched: do c...
1902
  	return false;
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1903
  }
6d048f531   Jens Axboe   cfq-iosched: deve...
1904
  static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507c   Jens Axboe   [PATCH] Update cf...
1905
  {
1792669cc   Jens Axboe   cfq-iosched: don'...
1906
  	struct cfq_queue *cfqq = cfqd->active_queue;
c58698073   Tejun Heo   block, cfq: reorg...
1907
  	struct cfq_io_cq *cic;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1908
  	unsigned long sl, group_idle = 0;
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1909

a68bbddba   Jens Axboe   block: add queue ...
1910
  	/*
f7d7b7a7a   Jens Axboe   block: as/cfq ssd...
1911
1912
1913
  	 * SSD device without seek penalty, disable idling. But only do so
  	 * for devices that support queuing, otherwise we still have a problem
  	 * with sync vs async workloads.
a68bbddba   Jens Axboe   block: add queue ...
1914
  	 */
f7d7b7a7a   Jens Axboe   block: as/cfq ssd...
1915
  	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
a68bbddba   Jens Axboe   block: add queue ...
1916
  		return;
dd67d0515   Jens Axboe   [PATCH] rbtree: s...
1917
  	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f531   Jens Axboe   cfq-iosched: deve...
1918
  	WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507c   Jens Axboe   [PATCH] Update cf...
1919
1920
1921
1922
  
  	/*
  	 * idle is disabled, either manually or by past process history
  	 */
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1923
1924
1925
1926
1927
1928
1929
  	if (!cfq_should_idle(cfqd, cfqq)) {
  		/* no queue idling. Check for group idling */
  		if (cfqd->cfq_group_idle)
  			group_idle = cfqd->cfq_group_idle;
  		else
  			return;
  	}
6d048f531   Jens Axboe   cfq-iosched: deve...
1930

22e2c507c   Jens Axboe   [PATCH] Update cf...
1931
  	/*
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
1932
  	 * still active requests from this queue, don't idle
7b679138b   Jens Axboe   cfq-iosched: add ...
1933
  	 */
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
1934
  	if (cfqq->dispatched)
7b679138b   Jens Axboe   cfq-iosched: add ...
1935
1936
1937
  		return;
  
  	/*
22e2c507c   Jens Axboe   [PATCH] Update cf...
1938
1939
  	 * task has exited, don't wait
  	 */
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1940
  	cic = cfqd->active_cic;
c58698073   Tejun Heo   block, cfq: reorg...
1941
  	if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
6d048f531   Jens Axboe   cfq-iosched: deve...
1942
  		return;
355b659c8   Corrado Zoccolo   cfq-iosched: avoi...
1943
1944
1945
1946
1947
  	/*
  	 * If our average think time is larger than the remaining time
  	 * slice, then don't idle. This avoids overrunning the allotted
  	 * time slice.
  	 */
383cd7213   Shaohua Li   CFQ: move think t...
1948
1949
  	if (sample_valid(cic->ttime.ttime_samples) &&
  	    (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
fd16d2631   Joe Perches   block: Add __attr...
1950
  		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
383cd7213   Shaohua Li   CFQ: move think t...
1951
  			     cic->ttime.ttime_mean);
355b659c8   Corrado Zoccolo   cfq-iosched: avoi...
1952
  		return;
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
1953
  	}
355b659c8   Corrado Zoccolo   cfq-iosched: avoi...
1954

80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1955
1956
1957
  	/* There are other queues in the group, don't do group idle */
  	if (group_idle && cfqq->cfqg->nr_cfqq > 1)
  		return;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1958
  	cfq_mark_cfqq_wait_request(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1959

80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1960
1961
1962
1963
  	if (group_idle)
  		sl = cfqd->cfq_group_idle;
  	else
  		sl = cfqd->cfq_slice_idle;
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1964

7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1965
  	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1966
  	cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1967
1968
  	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
  			group_idle ? 1 : 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1969
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1970
1971
1972
  /*
   * Move request from internal lists to the request queue dispatch list.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1973
  static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1974
  {
3ed9a2965   Jens Axboe   cfq-iosched: impr...
1975
  	struct cfq_data *cfqd = q->elevator->elevator_data;
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1976
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1977

7b679138b   Jens Axboe   cfq-iosched: add ...
1978
  	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
06d218864   Jeff Moyer   cfq: choose a new...
1979
  	cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1980
  	cfq_remove_request(rq);
6d048f531   Jens Axboe   cfq-iosched: deve...
1981
  	cfqq->dispatched++;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1982
  	(RQ_CFQG(rq))->dispatched++;
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1983
  	elv_dispatch_sort(q, rq);
3ed9a2965   Jens Axboe   cfq-iosched: impr...
1984

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1985
  	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
c4e7893eb   Vivek Goyal   cfq-iosched: blkt...
1986
  	cfqq->nr_sectors += blk_rq_sectors(rq);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1987
  	cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
84c124da9   Divyesh Shah   blkio: Changes to...
1988
  					rq_data_dir(rq), rq_is_sync(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1989
1990
1991
1992
1993
  }
  
  /*
   * return expired entry, or NULL to just start from scratch in rbtree
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1994
  static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1995
  {
30996f40b   Jens Axboe   cfq-iosched: fix ...
1996
  	struct request *rq = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1997

3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1998
  	if (cfq_cfqq_fifo_expire(cfqq))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1999
  		return NULL;
cb8874119   Jens Axboe   cfq-iosched: twea...
2000
2001
  
  	cfq_mark_cfqq_fifo_expire(cfqq);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2002
2003
  	if (list_empty(&cfqq->fifo))
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2004

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2005
  	rq = rq_entry_fifo(cfqq->fifo.next);
30996f40b   Jens Axboe   cfq-iosched: fix ...
2006
  	if (time_before(jiffies, rq_fifo_time(rq)))
7b679138b   Jens Axboe   cfq-iosched: add ...
2007
  		rq = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2008

30996f40b   Jens Axboe   cfq-iosched: fix ...
2009
  	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
6d048f531   Jens Axboe   cfq-iosched: deve...
2010
  	return rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2011
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
2012
2013
2014
2015
  static inline int
  cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2016

22e2c507c   Jens Axboe   [PATCH] Update cf...
2017
  	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2018

b9f8ce059   Namhyung Kim   cfq-iosched: alge...
2019
  	return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2020
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
2021
  /*
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2022
2023
2024
2025
2026
2027
2028
   * Must be called with the queue_lock held.
   */
  static int cfqq_process_refs(struct cfq_queue *cfqq)
  {
  	int process_refs, io_refs;
  
  	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
30d7b9448   Shaohua Li   block cfq: don't ...
2029
  	process_refs = cfqq->ref - io_refs;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2030
2031
2032
2033
2034
2035
  	BUG_ON(process_refs < 0);
  	return process_refs;
  }
  
  static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
  {
e6c5bc737   Jeff Moyer   cfq: break apart ...
2036
  	int process_refs, new_process_refs;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2037
  	struct cfq_queue *__cfqq;
c10b61f09   Jeff Moyer   cfq: Don't allow ...
2038
2039
2040
2041
2042
2043
2044
2045
  	/*
  	 * If there are no process references on the new_cfqq, then it is
  	 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
  	 * chain may have dropped their last reference (not just their
  	 * last process reference).
  	 */
  	if (!cfqq_process_refs(new_cfqq))
  		return;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2046
2047
2048
2049
2050
2051
2052
2053
  	/* Avoid a circular list and skip interim queue merges */
  	while ((__cfqq = new_cfqq->new_cfqq)) {
  		if (__cfqq == cfqq)
  			return;
  		new_cfqq = __cfqq;
  	}
  
  	process_refs = cfqq_process_refs(cfqq);
c10b61f09   Jeff Moyer   cfq: Don't allow ...
2054
  	new_process_refs = cfqq_process_refs(new_cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2055
2056
2057
2058
  	/*
  	 * If the process for the cfqq has gone away, there is no
  	 * sense in merging the queues.
  	 */
c10b61f09   Jeff Moyer   cfq: Don't allow ...
2059
  	if (process_refs == 0 || new_process_refs == 0)
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2060
  		return;
e6c5bc737   Jeff Moyer   cfq: break apart ...
2061
2062
2063
  	/*
  	 * Merge in the direction of the lesser amount of work.
  	 */
e6c5bc737   Jeff Moyer   cfq: break apart ...
2064
2065
  	if (new_process_refs >= process_refs) {
  		cfqq->new_cfqq = new_cfqq;
30d7b9448   Shaohua Li   block cfq: don't ...
2066
  		new_cfqq->ref += process_refs;
e6c5bc737   Jeff Moyer   cfq: break apart ...
2067
2068
  	} else {
  		new_cfqq->new_cfqq = cfqq;
30d7b9448   Shaohua Li   block cfq: don't ...
2069
  		cfqq->ref += new_process_refs;
e6c5bc737   Jeff Moyer   cfq: break apart ...
2070
  	}
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2071
  }
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2072
  static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2073
  				struct cfq_group *cfqg, enum wl_prio_t prio)
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2074
2075
2076
2077
2078
2079
  {
  	struct cfq_queue *queue;
  	int i;
  	bool key_valid = false;
  	unsigned long lowest_key = 0;
  	enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2080
2081
2082
  	for (i = 0; i <= SYNC_WORKLOAD; ++i) {
  		/* select the one with lowest rb_key */
  		queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
  		if (queue &&
  		    (!key_valid || time_before(queue->rb_key, lowest_key))) {
  			lowest_key = queue->rb_key;
  			cur_best = i;
  			key_valid = true;
  		}
  	}
  
  	return cur_best;
  }
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2093
  static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2094
  {
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2095
2096
  	unsigned slice;
  	unsigned count;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2097
  	struct cfq_rb_root *st;
58ff82f34   Vivek Goyal   blkio: Implement ...
2098
  	unsigned group_slice;
e4ea0c16a   Shaohua Li writes   block cfq: select...
2099
  	enum wl_prio_t original_prio = cfqd->serving_prio;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2100

718eee057   Corrado Zoccolo   cfq-iosched: fair...
2101
  	/* Choose next priority. RT > BE > IDLE */
58ff82f34   Vivek Goyal   blkio: Implement ...
2102
  	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2103
  		cfqd->serving_prio = RT_WORKLOAD;
58ff82f34   Vivek Goyal   blkio: Implement ...
2104
  	else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2105
2106
2107
2108
2109
2110
  		cfqd->serving_prio = BE_WORKLOAD;
  	else {
  		cfqd->serving_prio = IDLE_WORKLOAD;
  		cfqd->workload_expires = jiffies + 1;
  		return;
  	}
e4ea0c16a   Shaohua Li writes   block cfq: select...
2111
2112
  	if (original_prio != cfqd->serving_prio)
  		goto new_workload;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2113
2114
2115
2116
2117
  	/*
  	 * For RT and BE, we have to choose also the type
  	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
  	 * expiration time
  	 */
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2118
  	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2119
  	count = st->count;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2120
2121
  
  	/*
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2122
  	 * check workload expiration, and that we still have other queues ready
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2123
  	 */
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2124
  	if (count && !time_after(jiffies, cfqd->workload_expires))
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2125
  		return;
e4ea0c16a   Shaohua Li writes   block cfq: select...
2126
  new_workload:
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2127
2128
  	/* otherwise select new workload type */
  	cfqd->serving_type =
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2129
2130
  		cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
  	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2131
  	count = st->count;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2132
2133
2134
2135
2136
2137
  
  	/*
  	 * the workload slice is computed as a fraction of target latency
  	 * proportional to the number of queues in that workload, over
  	 * all the queues in the same priority class
  	 */
58ff82f34   Vivek Goyal   blkio: Implement ...
2138
2139
2140
2141
2142
  	group_slice = cfq_group_slice(cfqd, cfqg);
  
  	slice = group_slice * count /
  		max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
  		      cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2143

f26bd1f0a   Vivek Goyal   blkio: Determine ...
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
  	if (cfqd->serving_type == ASYNC_WORKLOAD) {
  		unsigned int tmp;
  
  		/*
  		 * Async queues are currently system wide. Just taking
  		 * proportion of queues with-in same group will lead to higher
  		 * async ratio system wide as generally root group is going
  		 * to have higher weight. A more accurate thing would be to
  		 * calculate system wide asnc/sync ratio.
  		 */
  		tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
  		tmp = tmp/cfqd->busy_queues;
  		slice = min_t(unsigned, slice, tmp);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2157
2158
2159
  		/* async workload slice is scaled down according to
  		 * the sync/async slice ratio. */
  		slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
f26bd1f0a   Vivek Goyal   blkio: Determine ...
2160
  	} else
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2161
2162
2163
2164
  		/* sync workload slice is at least 2 * cfq_slice_idle */
  		slice = max(slice, 2 * cfqd->cfq_slice_idle);
  
  	slice = max_t(unsigned, slice, CFQ_MIN_TT);
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
2165
  	cfq_log(cfqd, "workload slice:%d", slice);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2166
2167
  	cfqd->workload_expires = jiffies + slice;
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2168
2169
2170
  static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b077   Vivek Goyal   blkio: Introduce ...
2171
  	struct cfq_group *cfqg;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2172
2173
2174
  
  	if (RB_EMPTY_ROOT(&st->rb))
  		return NULL;
25bc6b077   Vivek Goyal   blkio: Introduce ...
2175
  	cfqg = cfq_rb_first_group(st);
25bc6b077   Vivek Goyal   blkio: Introduce ...
2176
2177
  	update_min_vdisktime(st);
  	return cfqg;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2178
  }
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2179
2180
  static void cfq_choose_cfqg(struct cfq_data *cfqd)
  {
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2181
2182
2183
  	struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
  
  	cfqd->serving_group = cfqg;
dae739ebc   Vivek Goyal   blkio: Group time...
2184
2185
2186
2187
2188
2189
  
  	/* Restore the workload type data */
  	if (cfqg->saved_workload_slice) {
  		cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
  		cfqd->serving_type = cfqg->saved_workload;
  		cfqd->serving_prio = cfqg->saved_serving_prio;
66ae29197   Gui Jianfeng   cfq: set workload...
2190
2191
  	} else
  		cfqd->workload_expires = jiffies - 1;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2192
  	choose_service_tree(cfqd, cfqg);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2193
  }
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2194
  /*
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
2195
2196
   * Select a queue for service. If we have a current active queue,
   * check whether to continue servicing it, or retrieve and set a new one.
22e2c507c   Jens Axboe   [PATCH] Update cf...
2197
   */
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2198
  static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2199
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
2200
  	struct cfq_queue *cfqq, *new_cfqq = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2201

22e2c507c   Jens Axboe   [PATCH] Update cf...
2202
2203
2204
  	cfqq = cfqd->active_queue;
  	if (!cfqq)
  		goto new_queue;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2205

f04a64246   Vivek Goyal   blkio: Keep queue...
2206
2207
  	if (!cfqd->rq_queued)
  		return NULL;
c244bb50a   Vivek Goyal   cfq-iosched: Get ...
2208
2209
2210
2211
2212
2213
  
  	/*
  	 * We were waiting for group to get backlogged. Expire the queue
  	 */
  	if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
  		goto expire;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2214
  	/*
6d048f531   Jens Axboe   cfq-iosched: deve...
2215
  	 * The active queue has run out of time, expire it and select new.
22e2c507c   Jens Axboe   [PATCH] Update cf...
2216
  	 */
7667aa063   Vivek Goyal   cfq-iosched: Take...
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
  	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
  		/*
  		 * If slice had not expired at the completion of last request
  		 * we might not have turned on wait_busy flag. Don't expire
  		 * the queue yet. Allow the group to get backlogged.
  		 *
  		 * The very fact that we have used the slice, that means we
  		 * have been idling all along on this queue and it should be
  		 * ok to wait for this request to complete.
  		 */
82bbbf28d   Vivek Goyal   Fix a CFQ crash i...
2227
2228
2229
  		if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
  		    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  			cfqq = NULL;
7667aa063   Vivek Goyal   cfq-iosched: Take...
2230
  			goto keep_queue;
82bbbf28d   Vivek Goyal   Fix a CFQ crash i...
2231
  		} else
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
2232
  			goto check_group_idle;
7667aa063   Vivek Goyal   cfq-iosched: Take...
2233
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2234

22e2c507c   Jens Axboe   [PATCH] Update cf...
2235
  	/*
6d048f531   Jens Axboe   cfq-iosched: deve...
2236
2237
  	 * The active queue has requests and isn't expired, allow it to
  	 * dispatch.
22e2c507c   Jens Axboe   [PATCH] Update cf...
2238
  	 */
dd67d0515   Jens Axboe   [PATCH] rbtree: s...
2239
  	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507c   Jens Axboe   [PATCH] Update cf...
2240
  		goto keep_queue;
6d048f531   Jens Axboe   cfq-iosched: deve...
2241
2242
  
  	/*
a36e71f99   Jens Axboe   cfq-iosched: add ...
2243
2244
2245
  	 * If another queue has a request waiting within our mean seek
  	 * distance, let it run.  The expire code will check for close
  	 * cooperators and put the close queue at the front of the service
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2246
  	 * tree.  If possible, merge the expiring queue with the new cfqq.
a36e71f99   Jens Axboe   cfq-iosched: add ...
2247
  	 */
b3b6d0408   Jeff Moyer   cfq: change the m...
2248
  	new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2249
2250
2251
  	if (new_cfqq) {
  		if (!cfqq->new_cfqq)
  			cfq_setup_merge(cfqq, new_cfqq);
a36e71f99   Jens Axboe   cfq-iosched: add ...
2252
  		goto expire;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2253
  	}
a36e71f99   Jens Axboe   cfq-iosched: add ...
2254
2255
  
  	/*
6d048f531   Jens Axboe   cfq-iosched: deve...
2256
2257
2258
2259
  	 * No requests pending. If the active queue still has requests in
  	 * flight or is idling for a new request, allow either of these
  	 * conditions to happen (or time out) before selecting a new queue.
  	 */
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
2260
2261
2262
2263
  	if (timer_pending(&cfqd->idle_slice_timer)) {
  		cfqq = NULL;
  		goto keep_queue;
  	}
8e1ac6655   Shaohua Li   cfq-iosched: don'...
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
  	/*
  	 * This is a deep seek queue, but the device is much faster than
  	 * the queue can deliver, don't idle
  	 **/
  	if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
  	    (cfq_cfqq_slice_new(cfqq) ||
  	    (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
  		cfq_clear_cfqq_deep(cfqq);
  		cfq_clear_cfqq_idle_window(cfqq);
  	}
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
  	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  		cfqq = NULL;
  		goto keep_queue;
  	}
  
  	/*
  	 * If group idle is enabled and there are requests dispatched from
  	 * this group, wait for requests to complete.
  	 */
  check_group_idle:
7700fc4f6   Shaohua Li   CFQ: add think ti...
2284
2285
2286
  	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
  	    cfqq->cfqg->dispatched &&
  	    !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2287
2288
  		cfqq = NULL;
  		goto keep_queue;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2289
  	}
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2290
  expire:
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2291
  	cfq_slice_expired(cfqd, 0);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2292
  new_queue:
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2293
2294
2295
2296
2297
  	/*
  	 * Current queue expired. Check if we have to switch to a new
  	 * service tree
  	 */
  	if (!new_cfqq)
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2298
  		cfq_choose_cfqg(cfqd);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2299

a36e71f99   Jens Axboe   cfq-iosched: add ...
2300
  	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2301
  keep_queue:
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2302
  	return cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2303
  }
febffd618   Jens Axboe   cfq-iosched: kill...
2304
  static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2305
2306
2307
2308
2309
2310
2311
2312
2313
  {
  	int dispatched = 0;
  
  	while (cfqq->next_rq) {
  		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
  		dispatched++;
  	}
  
  	BUG_ON(!list_empty(&cfqq->fifo));
f04a64246   Vivek Goyal   blkio: Keep queue...
2314
2315
  
  	/* By default cfqq is not expired if it is empty. Do it explicitly */
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2316
  	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2317
2318
  	return dispatched;
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
2319
2320
2321
2322
  /*
   * Drain our current requests. Used for barriers and when switching
   * io schedulers on-the-fly.
   */
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2323
  static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2324
  {
0871714e0   Jens Axboe   cfq-iosched: rela...
2325
  	struct cfq_queue *cfqq;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2326
  	int dispatched = 0;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2327

3440c49f5   Divyesh Shah   cfq-iosched: Fix ...
2328
  	/* Expire the timeslice of the current active queue first */
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2329
  	cfq_slice_expired(cfqd, 0);
3440c49f5   Divyesh Shah   cfq-iosched: Fix ...
2330
2331
  	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
  		__cfq_set_active_queue(cfqd, cfqq);
f04a64246   Vivek Goyal   blkio: Keep queue...
2332
  		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3440c49f5   Divyesh Shah   cfq-iosched: Fix ...
2333
  	}
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2334

1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2335
  	BUG_ON(cfqd->busy_queues);
6923715ae   Jeff Moyer   cfq: remove extra...
2336
  	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2337
2338
  	return dispatched;
  }
abc3c744d   Shaohua Li   cfq-iosched: quan...
2339
2340
2341
2342
2343
  static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
  	struct cfq_queue *cfqq)
  {
  	/* the queue hasn't finished any request, can't estimate */
  	if (cfq_cfqq_slice_new(cfqq))
c1e44756f   Shaohua Li   cfq-iosched: do c...
2344
  		return true;
abc3c744d   Shaohua Li   cfq-iosched: quan...
2345
2346
  	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
  		cfqq->slice_end))
c1e44756f   Shaohua Li   cfq-iosched: do c...
2347
  		return true;
abc3c744d   Shaohua Li   cfq-iosched: quan...
2348

c1e44756f   Shaohua Li   cfq-iosched: do c...
2349
  	return false;
abc3c744d   Shaohua Li   cfq-iosched: quan...
2350
  }
0b182d617   Jens Axboe   cfq-iosched: abst...
2351
  static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2352
  {
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2353
  	unsigned int max_dispatch;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2354

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2355
  	/*
5ad531db6   Jens Axboe   cfq-iosched: drai...
2356
2357
  	 * Drain async requests before we start sync IO
  	 */
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
2358
  	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
0b182d617   Jens Axboe   cfq-iosched: abst...
2359
  		return false;
5ad531db6   Jens Axboe   cfq-iosched: drai...
2360
2361
  
  	/*
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2362
2363
  	 * If this is an async queue and we have sync IO in flight, let it wait
  	 */
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
2364
  	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
0b182d617   Jens Axboe   cfq-iosched: abst...
2365
  		return false;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2366

abc3c744d   Shaohua Li   cfq-iosched: quan...
2367
  	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2368
2369
  	if (cfq_class_idle(cfqq))
  		max_dispatch = 1;
b4878f245   Jens Axboe   [PATCH] 02/05: up...
2370

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2371
2372
2373
2374
  	/*
  	 * Does this cfqq already have too much IO in flight?
  	 */
  	if (cfqq->dispatched >= max_dispatch) {
ef8a41df8   Shaohua Li   cfq-iosched: give...
2375
  		bool promote_sync = false;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2376
2377
2378
  		/*
  		 * idle queue must always only have a single IO in flight
  		 */
3ed9a2965   Jens Axboe   cfq-iosched: impr...
2379
  		if (cfq_class_idle(cfqq))
0b182d617   Jens Axboe   cfq-iosched: abst...
2380
  			return false;
3ed9a2965   Jens Axboe   cfq-iosched: impr...
2381

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2382
  		/*
c4ade94fc   Li, Shaohua   cfq-iosched: remo...
2383
2384
  		 * If there is only one sync queue
  		 * we can ignore async queue here and give the sync
ef8a41df8   Shaohua Li   cfq-iosched: give...
2385
2386
2387
2388
  		 * queue no dispatch limit. The reason is a sync queue can
  		 * preempt async queue, limiting the sync queue doesn't make
  		 * sense. This is useful for aiostress test.
  		 */
c4ade94fc   Li, Shaohua   cfq-iosched: remo...
2389
2390
  		if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
  			promote_sync = true;
ef8a41df8   Shaohua Li   cfq-iosched: give...
2391
2392
  
  		/*
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2393
2394
  		 * We have other queues, don't allow more IO from this one
  		 */
ef8a41df8   Shaohua Li   cfq-iosched: give...
2395
2396
  		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
  				!promote_sync)
0b182d617   Jens Axboe   cfq-iosched: abst...
2397
  			return false;
9ede209e8   Jens Axboe   cfq-iosched: impr...
2398

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2399
  		/*
474b18ccc   Shaohua Li   cfq-iosched: no d...
2400
  		 * Sole queue user, no limit
365722bb9   Vivek Goyal   cfq-iosched: dela...
2401
  		 */
ef8a41df8   Shaohua Li   cfq-iosched: give...
2402
  		if (cfqd->busy_queues == 1 || promote_sync)
abc3c744d   Shaohua Li   cfq-iosched: quan...
2403
2404
2405
2406
2407
2408
2409
2410
2411
  			max_dispatch = -1;
  		else
  			/*
  			 * Normally we start throttling cfqq when cfq_quantum/2
  			 * requests have been dispatched. But we can drive
  			 * deeper queue depths at the beginning of slice
  			 * subjected to upper limit of cfq_quantum.
  			 * */
  			max_dispatch = cfqd->cfq_quantum;
8e2967555   Jens Axboe   cfq-iosched: impl...
2412
2413
2414
2415
2416
2417
2418
  	}
  
  	/*
  	 * Async queues must wait a bit before being allowed dispatch.
  	 * We also ramp up the dispatch depth gradually for async IO,
  	 * based on the last sync IO we serviced
  	 */
963b72fc6   Jens Axboe   cfq-iosched: rena...
2419
  	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
573412b29   Corrado Zoccolo   cfq-iosched: redu...
2420
  		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
8e2967555   Jens Axboe   cfq-iosched: impl...
2421
  		unsigned int depth;
365722bb9   Vivek Goyal   cfq-iosched: dela...
2422

61f0c1dca   Jens Axboe   cfq-iosched: use ...
2423
  		depth = last_sync / cfqd->cfq_slice[1];
e00c54c36   Jens Axboe   cfq-iosched: don'...
2424
2425
  		if (!depth && !cfqq->dispatched)
  			depth = 1;
8e2967555   Jens Axboe   cfq-iosched: impl...
2426
2427
  		if (depth < max_dispatch)
  			max_dispatch = depth;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2428
  	}
3ed9a2965   Jens Axboe   cfq-iosched: impr...
2429

0b182d617   Jens Axboe   cfq-iosched: abst...
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
  	/*
  	 * If we're below the current max, allow a dispatch
  	 */
  	return cfqq->dispatched < max_dispatch;
  }
  
  /*
   * Dispatch a request from cfqq, moving them to the request queue
   * dispatch list.
   */
  static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	struct request *rq;
  
  	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
  
  	if (!cfq_may_dispatch(cfqd, cfqq))
  		return false;
  
  	/*
  	 * follow expired path, else get first next available
  	 */
  	rq = cfq_check_fifo(cfqq);
  	if (!rq)
  		rq = cfqq->next_rq;
  
  	/*
  	 * insert request into driver dispatch list
  	 */
  	cfq_dispatch_insert(cfqd->queue, rq);
  
  	if (!cfqd->active_cic) {
c58698073   Tejun Heo   block, cfq: reorg...
2462
  		struct cfq_io_cq *cic = RQ_CIC(rq);
0b182d617   Jens Axboe   cfq-iosched: abst...
2463

c58698073   Tejun Heo   block, cfq: reorg...
2464
  		atomic_long_inc(&cic->icq.ioc->refcount);
0b182d617   Jens Axboe   cfq-iosched: abst...
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
  		cfqd->active_cic = cic;
  	}
  
  	return true;
  }
  
  /*
   * Find the cfqq that we need to service and move a request from that to the
   * dispatch list
   */
  static int cfq_dispatch_requests(struct request_queue *q, int force)
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct cfq_queue *cfqq;
  
  	if (!cfqd->busy_queues)
  		return 0;
  
  	if (unlikely(force))
  		return cfq_forced_dispatch(cfqd);
  
  	cfqq = cfq_select_queue(cfqd);
  	if (!cfqq)
8e2967555   Jens Axboe   cfq-iosched: impl...
2488
  		return 0;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2489
  	/*
0b182d617   Jens Axboe   cfq-iosched: abst...
2490
  	 * Dispatch a request from this cfqq, if it is allowed
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2491
  	 */
0b182d617   Jens Axboe   cfq-iosched: abst...
2492
2493
  	if (!cfq_dispatch_request(cfqd, cfqq))
  		return 0;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2494
  	cfqq->slice_dispatch++;
b029195dd   Jens Axboe   cfq-iosched: don'...
2495
  	cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2496

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2497
2498
2499
2500
2501
2502
2503
2504
  	/*
  	 * expire an async queue immediately if it has used up its slice. idle
  	 * queue always expire after 1 dispatch round.
  	 */
  	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
  	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  	    cfq_class_idle(cfqq))) {
  		cfqq->slice_end = jiffies + 1;
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2505
  		cfq_slice_expired(cfqd, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2506
  	}
b217a903a   Shan Wei   cfq: fix the log ...
2507
  	cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2508
  	return 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2509
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2510
  /*
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
2511
2512
   * task holds one reference to the queue, dropped when task exits. each rq
   * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2513
   *
b1c357696   Vivek Goyal   blkio: Take care ...
2514
   * Each cfq queue took a reference on the parent group. Drop it now.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2515
2516
2517
2518
   * queue lock must be held here.
   */
  static void cfq_put_queue(struct cfq_queue *cfqq)
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
2519
  	struct cfq_data *cfqd = cfqq->cfqd;
0bbfeb832   Justin TerAvest   cfq-iosched: Alwa...
2520
  	struct cfq_group *cfqg;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2521

30d7b9448   Shaohua Li   block cfq: don't ...
2522
  	BUG_ON(cfqq->ref <= 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2523

30d7b9448   Shaohua Li   block cfq: don't ...
2524
2525
  	cfqq->ref--;
  	if (cfqq->ref)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2526
  		return;
7b679138b   Jens Axboe   cfq-iosched: add ...
2527
  	cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2528
  	BUG_ON(rb_first(&cfqq->sort_list));
22e2c507c   Jens Axboe   [PATCH] Update cf...
2529
  	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
b1c357696   Vivek Goyal   blkio: Take care ...
2530
  	cfqg = cfqq->cfqg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2531

28f95cbc3   Jens Axboe   cfq-iosched: remo...
2532
  	if (unlikely(cfqd->active_queue == cfqq)) {
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2533
  		__cfq_slice_expired(cfqd, cfqq, 0);
23e018a1b   Jens Axboe   block: get rid of...
2534
  		cfq_schedule_dispatch(cfqd);
28f95cbc3   Jens Axboe   cfq-iosched: remo...
2535
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
2536

f04a64246   Vivek Goyal   blkio: Keep queue...
2537
  	BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2538
  	kmem_cache_free(cfq_pool, cfqq);
b1c357696   Vivek Goyal   blkio: Take care ...
2539
  	cfq_put_cfqg(cfqg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2540
  }
d02a2c077   Shaohua Li   cfq-iosched: fix ...
2541
  static void cfq_put_cooperator(struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2542
  {
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2543
  	struct cfq_queue *__cfqq, *next;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
  	/*
  	 * If this queue was scheduled to merge with another queue, be
  	 * sure to drop the reference taken on that queue (and others in
  	 * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
  	 */
  	__cfqq = cfqq->new_cfqq;
  	while (__cfqq) {
  		if (__cfqq == cfqq) {
  			WARN(1, "cfqq->new_cfqq loop detected
  ");
  			break;
  		}
  		next = __cfqq->new_cfqq;
  		cfq_put_queue(__cfqq);
  		__cfqq = next;
  	}
d02a2c077   Shaohua Li   cfq-iosched: fix ...
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
  }
  
  static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	if (unlikely(cfqq == cfqd->active_queue)) {
  		__cfq_slice_expired(cfqd, cfqq, 0);
  		cfq_schedule_dispatch(cfqd);
  	}
  
  	cfq_put_cooperator(cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2570

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2571
2572
  	cfq_put_queue(cfqq);
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
2573

9b84cacd0   Tejun Heo   block, cfq: restr...
2574
2575
2576
2577
2578
2579
  static void cfq_init_icq(struct io_cq *icq)
  {
  	struct cfq_io_cq *cic = icq_to_cic(icq);
  
  	cic->ttime.last_end_request = jiffies;
  }
c58698073   Tejun Heo   block, cfq: reorg...
2580
  static void cfq_exit_icq(struct io_cq *icq)
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2581
  {
c58698073   Tejun Heo   block, cfq: reorg...
2582
  	struct cfq_io_cq *cic = icq_to_cic(icq);
283287a52   Tejun Heo   block, cfq: misc ...
2583
  	struct cfq_data *cfqd = cic_to_cfqd(cic);
4faa3c815   Fabio Checconi   cfq-iosched: do n...
2584

ff6657c6c   Jens Axboe   cfq-iosched: get ...
2585
2586
2587
  	if (cic->cfqq[BLK_RW_ASYNC]) {
  		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
  		cic->cfqq[BLK_RW_ASYNC] = NULL;
12a057321   Al Viro   [PATCH] keep sync...
2588
  	}
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2589
2590
2591
  	if (cic->cfqq[BLK_RW_SYNC]) {
  		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
  		cic->cfqq[BLK_RW_SYNC] = NULL;
12a057321   Al Viro   [PATCH] keep sync...
2592
  	}
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2593
  }
fd0928df9   Jens Axboe   ioprio: move io p...
2594
  static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2595
2596
2597
  {
  	struct task_struct *tsk = current;
  	int ioprio_class;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2598
  	if (!cfq_cfqq_prio_changed(cfqq))
22e2c507c   Jens Axboe   [PATCH] Update cf...
2599
  		return;
fd0928df9   Jens Axboe   ioprio: move io p...
2600
  	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2601
  	switch (ioprio_class) {
fe094d98e   Jens Axboe   cfq-iosched: make...
2602
2603
2604
2605
2606
  	default:
  		printk(KERN_ERR "cfq: bad prio %x
  ", ioprio_class);
  	case IOPRIO_CLASS_NONE:
  		/*
6d63c2755   Jens Axboe   cfq-iosched: make...
2607
  		 * no prio set, inherit CPU scheduling settings
fe094d98e   Jens Axboe   cfq-iosched: make...
2608
2609
  		 */
  		cfqq->ioprio = task_nice_ioprio(tsk);
6d63c2755   Jens Axboe   cfq-iosched: make...
2610
  		cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98e   Jens Axboe   cfq-iosched: make...
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
  		break;
  	case IOPRIO_CLASS_RT:
  		cfqq->ioprio = task_ioprio(ioc);
  		cfqq->ioprio_class = IOPRIO_CLASS_RT;
  		break;
  	case IOPRIO_CLASS_BE:
  		cfqq->ioprio = task_ioprio(ioc);
  		cfqq->ioprio_class = IOPRIO_CLASS_BE;
  		break;
  	case IOPRIO_CLASS_IDLE:
  		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  		cfqq->ioprio = 7;
  		cfq_clear_cfqq_idle_window(cfqq);
  		break;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2625
2626
2627
2628
2629
2630
2631
  	}
  
  	/*
  	 * keep track of original prio settings in case we have to temporarily
  	 * elevate the priority of this queue
  	 */
  	cfqq->org_ioprio = cfqq->ioprio;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2632
  	cfq_clear_cfqq_prio_changed(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2633
  }
c58698073   Tejun Heo   block, cfq: reorg...
2634
  static void changed_ioprio(struct cfq_io_cq *cic)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2635
  {
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2636
  	struct cfq_data *cfqd = cic_to_cfqd(cic);
478a82b0e   Al Viro   [PATCH] switch to...
2637
  	struct cfq_queue *cfqq;
35e6077cb   Jens Axboe   [PATCH] cfq-iosch...
2638

caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2639
2640
  	if (unlikely(!cfqd))
  		return;
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2641
  	cfqq = cic->cfqq[BLK_RW_ASYNC];
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2642
2643
  	if (cfqq) {
  		struct cfq_queue *new_cfqq;
c58698073   Tejun Heo   block, cfq: reorg...
2644
  		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2645
  						GFP_ATOMIC);
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2646
  		if (new_cfqq) {
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2647
  			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2648
2649
  			cfq_put_queue(cfqq);
  		}
22e2c507c   Jens Axboe   [PATCH] Update cf...
2650
  	}
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2651

ff6657c6c   Jens Axboe   cfq-iosched: get ...
2652
  	cfqq = cic->cfqq[BLK_RW_SYNC];
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2653
2654
  	if (cfqq)
  		cfq_mark_cfqq_prio_changed(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2655
  }
d5036d770   Jens Axboe   cfq-iosched: move...
2656
  static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2657
  			  pid_t pid, bool is_sync)
d5036d770   Jens Axboe   cfq-iosched: move...
2658
2659
2660
2661
  {
  	RB_CLEAR_NODE(&cfqq->rb_node);
  	RB_CLEAR_NODE(&cfqq->p_node);
  	INIT_LIST_HEAD(&cfqq->fifo);
30d7b9448   Shaohua Li   block cfq: don't ...
2662
  	cfqq->ref = 0;
d5036d770   Jens Axboe   cfq-iosched: move...
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
  	cfqq->cfqd = cfqd;
  
  	cfq_mark_cfqq_prio_changed(cfqq);
  
  	if (is_sync) {
  		if (!cfq_class_idle(cfqq))
  			cfq_mark_cfqq_idle_window(cfqq);
  		cfq_mark_cfqq_sync(cfqq);
  	}
  	cfqq->pid = pid;
  }
24610333d   Vivek Goyal   blkio: Drop the r...
2674
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
c58698073   Tejun Heo   block, cfq: reorg...
2675
  static void changed_cgroup(struct cfq_io_cq *cic)
24610333d   Vivek Goyal   blkio: Drop the r...
2676
2677
  {
  	struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2678
  	struct cfq_data *cfqd = cic_to_cfqd(cic);
24610333d   Vivek Goyal   blkio: Drop the r...
2679
2680
2681
2682
2683
2684
  	struct request_queue *q;
  
  	if (unlikely(!cfqd))
  		return;
  
  	q = cfqd->queue;
24610333d   Vivek Goyal   blkio: Drop the r...
2685
2686
2687
2688
2689
2690
2691
2692
2693
  	if (sync_cfqq) {
  		/*
  		 * Drop reference to sync queue. A new sync queue will be
  		 * assigned in new group upon arrival of a fresh request.
  		 */
  		cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
  		cic_set_cfqq(cic, NULL, 1);
  		cfq_put_queue(sync_cfqq);
  	}
24610333d   Vivek Goyal   blkio: Drop the r...
2694
  }
24610333d   Vivek Goyal   blkio: Drop the r...
2695
  #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
22e2c507c   Jens Axboe   [PATCH] Update cf...
2696
  static struct cfq_queue *
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2697
  cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
fd0928df9   Jens Axboe   ioprio: move io p...
2698
  		     struct io_context *ioc, gfp_t gfp_mask)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2699
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
2700
  	struct cfq_queue *cfqq, *new_cfqq = NULL;
c58698073   Tejun Heo   block, cfq: reorg...
2701
  	struct cfq_io_cq *cic;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2702
  	struct cfq_group *cfqg;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2703
2704
  
  retry:
3e59cf9d6   Vivek Goyal   cfq-iosched: Get ...
2705
  	cfqg = cfq_get_cfqg(cfqd);
4ac845a2e   Jens Axboe   block: cfq: make ...
2706
  	cic = cfq_cic_lookup(cfqd, ioc);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
2707
2708
  	/* cic always exists here */
  	cfqq = cic_to_cfqq(cic, is_sync);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2709

6118b70b3   Jens Axboe   cfq-iosched: get ...
2710
2711
2712
2713
2714
2715
  	/*
  	 * Always try a new alloc if we fell back to the OOM cfqq
  	 * originally, since it should just be a temporary situation.
  	 */
  	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  		cfqq = NULL;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2716
2717
2718
2719
2720
  		if (new_cfqq) {
  			cfqq = new_cfqq;
  			new_cfqq = NULL;
  		} else if (gfp_mask & __GFP_WAIT) {
  			spin_unlock_irq(cfqd->queue->queue_lock);
94f6030ca   Christoph Lameter   Slab allocators: ...
2721
  			new_cfqq = kmem_cache_alloc_node(cfq_pool,
6118b70b3   Jens Axboe   cfq-iosched: get ...
2722
  					gfp_mask | __GFP_ZERO,
94f6030ca   Christoph Lameter   Slab allocators: ...
2723
  					cfqd->queue->node);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2724
  			spin_lock_irq(cfqd->queue->queue_lock);
6118b70b3   Jens Axboe   cfq-iosched: get ...
2725
2726
  			if (new_cfqq)
  				goto retry;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2727
  		} else {
94f6030ca   Christoph Lameter   Slab allocators: ...
2728
2729
2730
  			cfqq = kmem_cache_alloc_node(cfq_pool,
  					gfp_mask | __GFP_ZERO,
  					cfqd->queue->node);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2731
  		}
6118b70b3   Jens Axboe   cfq-iosched: get ...
2732
2733
2734
  		if (cfqq) {
  			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
  			cfq_init_prio_data(cfqq, ioc);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2735
  			cfq_link_cfqq_cfqg(cfqq, cfqg);
6118b70b3   Jens Axboe   cfq-iosched: get ...
2736
2737
2738
  			cfq_log_cfqq(cfqd, cfqq, "alloced");
  		} else
  			cfqq = &cfqd->oom_cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2739
2740
2741
2742
  	}
  
  	if (new_cfqq)
  		kmem_cache_free(cfq_pool, new_cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2743
2744
  	return cfqq;
  }
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2745
2746
2747
  static struct cfq_queue **
  cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
  {
fe094d98e   Jens Axboe   cfq-iosched: make...
2748
  	switch (ioprio_class) {
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
  	case IOPRIO_CLASS_RT:
  		return &cfqd->async_cfqq[0][ioprio];
  	case IOPRIO_CLASS_BE:
  		return &cfqd->async_cfqq[1][ioprio];
  	case IOPRIO_CLASS_IDLE:
  		return &cfqd->async_idle_cfqq;
  	default:
  		BUG();
  	}
  }
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2759
  static struct cfq_queue *
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2760
  cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2761
2762
  	      gfp_t gfp_mask)
  {
fd0928df9   Jens Axboe   ioprio: move io p...
2763
2764
  	const int ioprio = task_ioprio(ioc);
  	const int ioprio_class = task_ioprio_class(ioc);
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2765
  	struct cfq_queue **async_cfqq = NULL;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2766
  	struct cfq_queue *cfqq = NULL;
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2767
2768
2769
2770
  	if (!is_sync) {
  		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
  		cfqq = *async_cfqq;
  	}
6118b70b3   Jens Axboe   cfq-iosched: get ...
2771
  	if (!cfqq)
fd0928df9   Jens Axboe   ioprio: move io p...
2772
  		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2773
2774
2775
2776
  
  	/*
  	 * pin the queue now that it's allocated, scheduler exit will prune it
  	 */
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2777
  	if (!is_sync && !(*async_cfqq)) {
30d7b9448   Shaohua Li   block cfq: don't ...
2778
  		cfqq->ref++;
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2779
  		*async_cfqq = cfqq;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2780
  	}
30d7b9448   Shaohua Li   block cfq: don't ...
2781
  	cfqq->ref++;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2782
2783
  	return cfqq;
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
2784
  static void
383cd7213   Shaohua Li   CFQ: move think t...
2785
  __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2786
  {
383cd7213   Shaohua Li   CFQ: move think t...
2787
2788
  	unsigned long elapsed = jiffies - ttime->last_end_request;
  	elapsed = min(elapsed, 2UL * slice_idle);
db3b5848e   Kiyoshi Ueda   When cfq I/O sche...
2789

383cd7213   Shaohua Li   CFQ: move think t...
2790
2791
2792
2793
2794
2795
2796
  	ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
  	ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
  	ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
  }
  
  static void
  cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c58698073   Tejun Heo   block, cfq: reorg...
2797
  			struct cfq_io_cq *cic)
383cd7213   Shaohua Li   CFQ: move think t...
2798
  {
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
2799
  	if (cfq_cfqq_sync(cfqq)) {
383cd7213   Shaohua Li   CFQ: move think t...
2800
  		__cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
2801
2802
2803
  		__cfq_update_io_thinktime(&cfqq->service_tree->ttime,
  			cfqd->cfq_slice_idle);
  	}
7700fc4f6   Shaohua Li   CFQ: add think ti...
2804
2805
2806
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
  #endif
22e2c507c   Jens Axboe   [PATCH] Update cf...
2807
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2808

206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
2809
  static void
b2c18e1e0   Jeff Moyer   cfq: calculate th...
2810
  cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f531   Jens Axboe   cfq-iosched: deve...
2811
  		       struct request *rq)
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
2812
  {
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
2813
  	sector_t sdist = 0;
41647e7a9   Corrado Zoccolo   cfq-iosched: reth...
2814
  	sector_t n_sec = blk_rq_sectors(rq);
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
2815
2816
2817
2818
2819
2820
  	if (cfqq->last_request_pos) {
  		if (cfqq->last_request_pos < blk_rq_pos(rq))
  			sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
  		else
  			sdist = cfqq->last_request_pos - blk_rq_pos(rq);
  	}
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
2821

3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
2822
  	cfqq->seek_history <<= 1;
41647e7a9   Corrado Zoccolo   cfq-iosched: reth...
2823
2824
2825
2826
  	if (blk_queue_nonrot(cfqd->queue))
  		cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
  	else
  		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
2827
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2828

22e2c507c   Jens Axboe   [PATCH] Update cf...
2829
2830
2831
2832
2833
2834
  /*
   * Disable idle window if the process thinks too long or seeks so much that
   * it doesn't matter
   */
  static void
  cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c58698073   Tejun Heo   block, cfq: reorg...
2835
  		       struct cfq_io_cq *cic)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2836
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
2837
  	int old_idle, enable_idle;
1be92f2fc   Jens Axboe   cfq-iosched: neve...
2838

0871714e0   Jens Axboe   cfq-iosched: rela...
2839
2840
2841
2842
  	/*
  	 * Don't idle for async or idle io prio class
  	 */
  	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2fc   Jens Axboe   cfq-iosched: neve...
2843
  		return;
c265a7f41   Jens Axboe   cfq-iosched: get ...
2844
  	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2845

76280aff1   Corrado Zoccolo   cfq-iosched: idli...
2846
2847
  	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
  		cfq_mark_cfqq_deep(cfqq);
749ef9f84   Corrado Zoccolo   cfq: improve fsyn...
2848
2849
  	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
  		enable_idle = 0;
c58698073   Tejun Heo   block, cfq: reorg...
2850
2851
2852
  	else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
  		 !cfqd->cfq_slice_idle ||
  		 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
22e2c507c   Jens Axboe   [PATCH] Update cf...
2853
  		enable_idle = 0;
383cd7213   Shaohua Li   CFQ: move think t...
2854
2855
  	else if (sample_valid(cic->ttime.ttime_samples)) {
  		if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2856
2857
2858
  			enable_idle = 0;
  		else
  			enable_idle = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2859
  	}
7b679138b   Jens Axboe   cfq-iosched: add ...
2860
2861
2862
2863
2864
2865
2866
  	if (old_idle != enable_idle) {
  		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
  		if (enable_idle)
  			cfq_mark_cfqq_idle_window(cfqq);
  		else
  			cfq_clear_cfqq_idle_window(cfqq);
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
2867
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2868

22e2c507c   Jens Axboe   [PATCH] Update cf...
2869
2870
2871
2872
  /*
   * Check if new_cfqq should preempt the currently active queue. Return 0 for
   * no or if we aren't sure, a 1 will cause a preempt.
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2873
  static bool
22e2c507c   Jens Axboe   [PATCH] Update cf...
2874
  cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
2875
  		   struct request *rq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2876
  {
6d048f531   Jens Axboe   cfq-iosched: deve...
2877
  	struct cfq_queue *cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2878

6d048f531   Jens Axboe   cfq-iosched: deve...
2879
2880
  	cfqq = cfqd->active_queue;
  	if (!cfqq)
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2881
  		return false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2882

6d048f531   Jens Axboe   cfq-iosched: deve...
2883
  	if (cfq_class_idle(new_cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2884
  		return false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2885
2886
  
  	if (cfq_class_idle(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2887
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
2888

22e2c507c   Jens Axboe   [PATCH] Update cf...
2889
  	/*
875feb63b   Divyesh Shah   cfq-iosched: Resp...
2890
2891
2892
2893
2894
2895
  	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
  	 */
  	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
  		return false;
  
  	/*
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
2896
2897
2898
  	 * if the new request is sync, but the currently running queue is
  	 * not, let the sync request have priority.
  	 */
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
2899
  	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2900
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
2901

8682e1f15   Vivek Goyal   blkio: Provide so...
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
  	if (new_cfqq->cfqg != cfqq->cfqg)
  		return false;
  
  	if (cfq_slice_used(cfqq))
  		return true;
  
  	/* Allow preemption only if we are idling on sync-noidle tree */
  	if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
  	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
  	    new_cfqq->service_tree->count == 2 &&
  	    RB_EMPTY_ROOT(&cfqq->sort_list))
  		return true;
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
2914
  	/*
b53d1ed73   Jens Axboe   Revert "cfq: Remo...
2915
2916
2917
  	 * So both queues are sync. Let the new request get disk time if
  	 * it's a metadata request and the current queue is doing regular IO.
  	 */
65299a3b7   Christoph Hellwig   block: separate p...
2918
  	if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
b53d1ed73   Jens Axboe   Revert "cfq: Remo...
2919
2920
2921
  		return true;
  
  	/*
3a9a3f6cc   Divyesh Shah   cfq-iosched: Allo...
2922
2923
2924
  	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
  	 */
  	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2925
  		return true;
3a9a3f6cc   Divyesh Shah   cfq-iosched: Allo...
2926

d2d59e18a   Shaohua Li   cfq-iosched: sche...
2927
2928
2929
  	/* An idle queue should not be idle now for some reason */
  	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
2930
  	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2931
  		return false;
1e3335de0   Jens Axboe   cfq-iosched: impr...
2932
2933
2934
2935
2936
  
  	/*
  	 * if this request is as-good as one we would expect from the
  	 * current cfqq, let it preempt
  	 */
e9ce335df   Shaohua Li   cfq-iosched: fix ...
2937
  	if (cfq_rq_close(cfqd, cfqq, rq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2938
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
2939

a6151c3a5   Jens Axboe   cfq-iosched: appl...
2940
  	return false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2941
2942
2943
2944
2945
2946
2947
2948
  }
  
  /*
   * cfqq preempts the active queue. if we allowed preempt with no slice left,
   * let it have half of its nominal slice.
   */
  static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
f8ae6e3eb   Shaohua Li   block cfq: make q...
2949
  	struct cfq_queue *old_cfqq = cfqd->active_queue;
7b679138b   Jens Axboe   cfq-iosched: add ...
2950
  	cfq_log_cfqq(cfqd, cfqq, "preempt");
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2951
  	cfq_slice_expired(cfqd, 1);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2952

bf5722567   Jens Axboe   [PATCH] cfq-iosch...
2953
  	/*
f8ae6e3eb   Shaohua Li   block cfq: make q...
2954
2955
2956
2957
2958
2959
2960
  	 * workload type is changed, don't save slice, otherwise preempt
  	 * doesn't happen
  	 */
  	if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
  		cfqq->cfqg->saved_workload_slice = 0;
  
  	/*
bf5722567   Jens Axboe   [PATCH] cfq-iosch...
2961
2962
2963
2964
  	 * Put the new queue at the front of the of the current list,
  	 * so we know that it will be selected next.
  	 */
  	BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd9   Jens Axboe   cfq-iosched: get ...
2965
2966
  
  	cfq_service_tree_add(cfqd, cfqq, 1);
eda5e0c91   Justin TerAvest   cfq-iosched: Don'...
2967

62a37f6ba   Justin TerAvest   cfq-iosched: Don'...
2968
2969
  	cfqq->slice_end = 0;
  	cfq_mark_cfqq_slice_new(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2970
2971
2972
  }
  
  /*
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
2973
   * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507c   Jens Axboe   [PATCH] Update cf...
2974
2975
2976
   * something we should do about it
   */
  static void
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
2977
2978
  cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  		struct request *rq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2979
  {
c58698073   Tejun Heo   block, cfq: reorg...
2980
  	struct cfq_io_cq *cic = RQ_CIC(rq);
12e9fddd6   Jens Axboe   [PATCH] cfq-iosch...
2981

45333d5a3   Aaron Carroll   cfq-iosched: fix ...
2982
  	cfqd->rq_queued++;
65299a3b7   Christoph Hellwig   block: separate p...
2983
2984
  	if (rq->cmd_flags & REQ_PRIO)
  		cfqq->prio_pending++;
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
2985

383cd7213   Shaohua Li   CFQ: move think t...
2986
  	cfq_update_io_thinktime(cfqd, cfqq, cic);
b2c18e1e0   Jeff Moyer   cfq: calculate th...
2987
  	cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a12   Jens Axboe   [PATCH] cfq-iosch...
2988
  	cfq_update_idle_window(cfqd, cfqq, cic);
b2c18e1e0   Jeff Moyer   cfq: calculate th...
2989
  	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2990
2991
2992
  
  	if (cfqq == cfqd->active_queue) {
  		/*
b029195dd   Jens Axboe   cfq-iosched: don'...
2993
2994
2995
  		 * Remember that we saw a request from this process, but
  		 * don't start queuing just yet. Otherwise we risk seeing lots
  		 * of tiny requests, because we disrupt the normal plugging
d6ceb25e8   Jens Axboe   cfq-iosched: don'...
2996
2997
  		 * and merging. If the request is already larger than a single
  		 * page, let it rip immediately. For that case we assume that
2d8707229   Jens Axboe   cfq-iosched: twea...
2998
2999
3000
  		 * merging is already done. Ditto for a busy system that
  		 * has other work pending, don't risk delaying until the
  		 * idle timer unplug to continue working.
22e2c507c   Jens Axboe   [PATCH] Update cf...
3001
  		 */
d6ceb25e8   Jens Axboe   cfq-iosched: don'...
3002
  		if (cfq_cfqq_wait_request(cfqq)) {
2d8707229   Jens Axboe   cfq-iosched: twea...
3003
3004
  			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
  			    cfqd->busy_queues > 1) {
812df48d1   Divyesh Shah   blkio: Add more d...
3005
  				cfq_del_timer(cfqd, cfqq);
554554f60   Gui Jianfeng   cfq: Remove wait_...
3006
  				cfq_clear_cfqq_wait_request(cfqq);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3007
  				__blk_run_queue(cfqd->queue);
a11cdaa7a   Divyesh Shah   block: Update to ...
3008
  			} else {
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3009
  				cfq_blkiocg_update_idle_time_stats(
a11cdaa7a   Divyesh Shah   block: Update to ...
3010
  						&cfqq->cfqg->blkg);
bf7919371   Vivek Goyal   blkio: Set must_d...
3011
  				cfq_mark_cfqq_must_dispatch(cfqq);
a11cdaa7a   Divyesh Shah   block: Update to ...
3012
  			}
d6ceb25e8   Jens Axboe   cfq-iosched: don'...
3013
  		}
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3014
  	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3015
3016
3017
  		/*
  		 * not the active queue - expire current slice if it is
  		 * idle and has expired it's mean thinktime or this new queue
3a9a3f6cc   Divyesh Shah   cfq-iosched: Allo...
3018
3019
  		 * has some old slice time left and is of higher priority or
  		 * this new queue is RT and the current one is BE
22e2c507c   Jens Axboe   [PATCH] Update cf...
3020
3021
  		 */
  		cfq_preempt_queue(cfqd, cfqq);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3022
  		__blk_run_queue(cfqd->queue);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3023
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3024
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3025
  static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3026
  {
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3027
  	struct cfq_data *cfqd = q->elevator->elevator_data;
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3028
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3029

7b679138b   Jens Axboe   cfq-iosched: add ...
3030
  	cfq_log_cfqq(cfqd, cfqq, "insert_request");
c58698073   Tejun Heo   block, cfq: reorg...
3031
  	cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3032

30996f40b   Jens Axboe   cfq-iosched: fix ...
3033
  	rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3034
  	list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
3035
  	cfq_add_rq_rb(rq);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3036
  	cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
cdc1184cf   Divyesh Shah   blkio: Add io_que...
3037
3038
  			&cfqd->serving_group->blkg, rq_data_dir(rq),
  			rq_is_sync(rq));
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3039
  	cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3040
  }
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3041
3042
3043
3044
3045
3046
  /*
   * Update hw_tag based on peak queue depth over 50 samples under
   * sufficient load.
   */
  static void cfq_update_hw_tag(struct cfq_data *cfqd)
  {
1a1238a7d   Shaohua Li   cfq-iosched: impr...
3047
  	struct cfq_queue *cfqq = cfqd->active_queue;
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3048
3049
  	if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
  		cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
3050
3051
3052
  
  	if (cfqd->hw_tag == 1)
  		return;
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3053
3054
  
  	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3055
  	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3056
  		return;
1a1238a7d   Shaohua Li   cfq-iosched: impr...
3057
3058
3059
3060
3061
3062
3063
  	/*
  	 * If active queue hasn't enough requests and can idle, cfq might not
  	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
  	 * case
  	 */
  	if (cfqq && cfq_cfqq_idle_window(cfqq) &&
  	    cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3064
  	    CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
1a1238a7d   Shaohua Li   cfq-iosched: impr...
3065
  		return;
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3066
3067
  	if (cfqd->hw_tag_samples++ < 50)
  		return;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
3068
  	if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3069
3070
3071
  		cfqd->hw_tag = 1;
  	else
  		cfqd->hw_tag = 0;
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3072
  }
7667aa063   Vivek Goyal   cfq-iosched: Take...
3073
3074
  static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
c58698073   Tejun Heo   block, cfq: reorg...
3075
  	struct cfq_io_cq *cic = cfqd->active_cic;
7667aa063   Vivek Goyal   cfq-iosched: Take...
3076

02a8f01b5   Justin TerAvest   cfq-iosched: Don'...
3077
3078
3079
  	/* If the queue already has requests, don't wait */
  	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  		return false;
7667aa063   Vivek Goyal   cfq-iosched: Take...
3080
3081
3082
  	/* If there are other queues in the group, don't wait */
  	if (cfqq->cfqg->nr_cfqq > 1)
  		return false;
7700fc4f6   Shaohua Li   CFQ: add think ti...
3083
3084
3085
  	/* the only queue in the group, but think time is big */
  	if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
  		return false;
7667aa063   Vivek Goyal   cfq-iosched: Take...
3086
3087
3088
3089
  	if (cfq_slice_used(cfqq))
  		return true;
  
  	/* if slice left is less than think time, wait busy */
383cd7213   Shaohua Li   CFQ: move think t...
3090
3091
  	if (cic && sample_valid(cic->ttime.ttime_samples)
  	    && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
7667aa063   Vivek Goyal   cfq-iosched: Take...
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
  		return true;
  
  	/*
  	 * If think times is less than a jiffy than ttime_mean=0 and above
  	 * will not be true. It might happen that slice has not expired yet
  	 * but will expire soon (4-5 ns) during select_queue(). To cover the
  	 * case where think time is less than a jiffy, mark the queue wait
  	 * busy if only 1 jiffy is left in the slice.
  	 */
  	if (cfqq->slice_end - jiffies == 1)
  		return true;
  
  	return false;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3106
  static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3107
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3108
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3109
  	struct cfq_data *cfqd = cfqq->cfqd;
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
3110
  	const int sync = rq_is_sync(rq);
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3111
  	unsigned long now;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3112

b4878f245   Jens Axboe   [PATCH] 02/05: up...
3113
  	now = jiffies;
33659ebba   Christoph Hellwig   block: remove wra...
3114
3115
  	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
  		     !!(rq->cmd_flags & REQ_NOIDLE));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3116

45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3117
  	cfq_update_hw_tag(cfqd);
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3118
  	WARN_ON(!cfqd->rq_in_driver);
6d048f531   Jens Axboe   cfq-iosched: deve...
3119
  	WARN_ON(!cfqq->dispatched);
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3120
  	cfqd->rq_in_driver--;
6d048f531   Jens Axboe   cfq-iosched: deve...
3121
  	cfqq->dispatched--;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3122
  	(RQ_CFQG(rq))->dispatched--;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3123
3124
3125
  	cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
  			rq_start_time_ns(rq), rq_io_start_time_ns(rq),
  			rq_data_dir(rq), rq_is_sync(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3126

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3127
  	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3ed9a2965   Jens Axboe   cfq-iosched: impr...
3128

365722bb9   Vivek Goyal   cfq-iosched: dela...
3129
  	if (sync) {
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
3130
  		struct cfq_rb_root *service_tree;
383cd7213   Shaohua Li   CFQ: move think t...
3131
  		RQ_CIC(rq)->ttime.last_end_request = now;
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
3132
3133
3134
3135
3136
3137
3138
  
  		if (cfq_cfqq_on_rr(cfqq))
  			service_tree = cfqq->service_tree;
  		else
  			service_tree = service_tree_for(cfqq->cfqg,
  				cfqq_prio(cfqq), cfqq_type(cfqq));
  		service_tree->ttime.last_end_request = now;
573412b29   Corrado Zoccolo   cfq-iosched: redu...
3139
3140
  		if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
  			cfqd->last_delayed_sync = now;
365722bb9   Vivek Goyal   cfq-iosched: dela...
3141
  	}
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3142

7700fc4f6   Shaohua Li   CFQ: add think ti...
3143
3144
3145
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	cfqq->cfqg->ttime.last_end_request = now;
  #endif
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3146
3147
3148
3149
3150
  	/*
  	 * If this is the active queue, check if it needs to be expired,
  	 * or if we want to idle in case it has no pending requests.
  	 */
  	if (cfqd->active_queue == cfqq) {
a36e71f99   Jens Axboe   cfq-iosched: add ...
3151
  		const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
44f7c1606   Jens Axboe   cfq-iosched: defe...
3152
3153
3154
3155
  		if (cfq_cfqq_slice_new(cfqq)) {
  			cfq_set_prio_slice(cfqd, cfqq);
  			cfq_clear_cfqq_slice_new(cfqq);
  		}
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3156
3157
  
  		/*
7667aa063   Vivek Goyal   cfq-iosched: Take...
3158
3159
  		 * Should we wait for next request to come in before we expire
  		 * the queue.
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3160
  		 */
7667aa063   Vivek Goyal   cfq-iosched: Take...
3161
  		if (cfq_should_wait_busy(cfqd, cfqq)) {
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3162
3163
3164
3165
  			unsigned long extend_sl = cfqd->cfq_slice_idle;
  			if (!cfqd->cfq_slice_idle)
  				extend_sl = cfqd->cfq_group_idle;
  			cfqq->slice_end = jiffies + extend_sl;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3166
  			cfq_mark_cfqq_wait_busy(cfqq);
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
3167
  			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3168
  		}
a36e71f99   Jens Axboe   cfq-iosched: add ...
3169
  		/*
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
3170
3171
3172
3173
3174
3175
  		 * Idling is not enabled on:
  		 * - expired queues
  		 * - idle-priority queues
  		 * - async queues
  		 * - queues with still some requests queued
  		 * - when there is a close cooperator
a36e71f99   Jens Axboe   cfq-iosched: add ...
3176
  		 */
0871714e0   Jens Axboe   cfq-iosched: rela...
3177
  		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3178
  			cfq_slice_expired(cfqd, 1);
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
3179
3180
  		else if (sync && cfqq_empty &&
  			 !cfq_close_cooperator(cfqd, cfqq)) {
749ef9f84   Corrado Zoccolo   cfq: improve fsyn...
3181
  			cfq_arm_slice_timer(cfqd);
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
3182
  		}
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3183
  	}
6d048f531   Jens Axboe   cfq-iosched: deve...
3184

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3185
  	if (!cfqd->rq_in_driver)
23e018a1b   Jens Axboe   block: get rid of...
3186
  		cfq_schedule_dispatch(cfqd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3187
  }
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
3188
  static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3189
  {
1b379d8da   Jens Axboe   cfq-iosched: get ...
3190
  	if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3191
  		cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3192
  		return ELV_MQUEUE_MUST;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3193
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3194

22e2c507c   Jens Axboe   [PATCH] Update cf...
3195
  	return ELV_MQUEUE_MAY;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3196
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3197
  static int cfq_may_queue(struct request_queue *q, int rw)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3198
3199
3200
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct task_struct *tsk = current;
c58698073   Tejun Heo   block, cfq: reorg...
3201
  	struct cfq_io_cq *cic;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3202
3203
3204
3205
3206
3207
3208
3209
  	struct cfq_queue *cfqq;
  
  	/*
  	 * don't force setup of a queue from here, as a call to may_queue
  	 * does not necessarily imply that a request actually will be queued.
  	 * so just lookup a possibly existing queue, or return 'may queue'
  	 * if that fails
  	 */
4ac845a2e   Jens Axboe   block: cfq: make ...
3210
  	cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3211
3212
  	if (!cic)
  		return ELV_MQUEUE_MAY;
b0b78f81a   Jens Axboe   cfq-iosched: use ...
3213
  	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
22e2c507c   Jens Axboe   [PATCH] Update cf...
3214
  	if (cfqq) {
c58698073   Tejun Heo   block, cfq: reorg...
3215
  		cfq_init_prio_data(cfqq, cic->icq.ioc);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3216

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
3217
  		return __cfq_may_queue(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3218
3219
3220
  	}
  
  	return ELV_MQUEUE_MAY;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3221
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3222
3223
3224
  /*
   * queue lock held here
   */
bb37b94c6   Jens Axboe   [BLOCK] Cleanup u...
3225
  static void cfq_put_request(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3226
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3227
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3228

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3229
  	if (cfqq) {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3230
  		const int rw = rq_data_dir(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3231

22e2c507c   Jens Axboe   [PATCH] Update cf...
3232
3233
  		BUG_ON(!cfqq->allocated[rw]);
  		cfqq->allocated[rw]--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3234

7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
3235
3236
  		/* Put down rq reference on cfqg */
  		cfq_put_cfqg(RQ_CFQG(rq));
a612fddf0   Tejun Heo   block, cfq: move ...
3237
3238
  		rq->elv.priv[0] = NULL;
  		rq->elv.priv[1] = NULL;
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
3239

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3240
3241
3242
  		cfq_put_queue(cfqq);
  	}
  }
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3243
  static struct cfq_queue *
c58698073   Tejun Heo   block, cfq: reorg...
3244
  cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3245
3246
3247
3248
  		struct cfq_queue *cfqq)
  {
  	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
  	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d0408   Jeff Moyer   cfq: change the m...
3249
  	cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3250
3251
3252
  	cfq_put_queue(cfqq);
  	return cic_to_cfqq(cic, 1);
  }
e6c5bc737   Jeff Moyer   cfq: break apart ...
3253
3254
3255
3256
3257
  /*
   * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
   * was the last process referring to said cfqq.
   */
  static struct cfq_queue *
c58698073   Tejun Heo   block, cfq: reorg...
3258
  split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
e6c5bc737   Jeff Moyer   cfq: break apart ...
3259
3260
  {
  	if (cfqq_process_refs(cfqq) == 1) {
e6c5bc737   Jeff Moyer   cfq: break apart ...
3261
3262
  		cfqq->pid = current->pid;
  		cfq_clear_cfqq_coop(cfqq);
ae54abed6   Shaohua Li   cfq-iosched: spli...
3263
  		cfq_clear_cfqq_split_coop(cfqq);
e6c5bc737   Jeff Moyer   cfq: break apart ...
3264
3265
3266
3267
  		return cfqq;
  	}
  
  	cic_set_cfqq(cic, NULL, 1);
d02a2c077   Shaohua Li   cfq-iosched: fix ...
3268
3269
  
  	cfq_put_cooperator(cfqq);
e6c5bc737   Jeff Moyer   cfq: break apart ...
3270
3271
3272
  	cfq_put_queue(cfqq);
  	return NULL;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3273
  /*
22e2c507c   Jens Axboe   [PATCH] Update cf...
3274
   * Allocate cfq data structures associated with this request.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3275
   */
22e2c507c   Jens Axboe   [PATCH] Update cf...
3276
  static int
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3277
  cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3278
3279
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
f1f8cc946   Tejun Heo   block, cfq: move ...
3280
  	struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3281
  	const int rw = rq_data_dir(rq);
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3282
  	const bool is_sync = rq_is_sync(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3283
  	struct cfq_queue *cfqq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3284
3285
  
  	might_sleep_if(gfp_mask & __GFP_WAIT);
216284c35   Tejun Heo   block, cfq: fix r...
3286
  	spin_lock_irq(q->queue_lock);
f1f8cc946   Tejun Heo   block, cfq: move ...
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
  
  	/* handle changed notifications */
  	if (unlikely(cic->icq.changed)) {
  		if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed))
  			changed_ioprio(cic);
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  		if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed))
  			changed_cgroup(cic);
  #endif
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
3297

e6c5bc737   Jeff Moyer   cfq: break apart ...
3298
  new_queue:
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3299
  	cfqq = cic_to_cfqq(cic, is_sync);
32f2e807a   Vivek Goyal   cfq-iosched: rese...
3300
  	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
c58698073   Tejun Heo   block, cfq: reorg...
3301
  		cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3302
  		cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3303
3304
  	} else {
  		/*
e6c5bc737   Jeff Moyer   cfq: break apart ...
3305
3306
  		 * If the queue was seeky for too long, break it apart.
  		 */
ae54abed6   Shaohua Li   cfq-iosched: spli...
3307
  		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
e6c5bc737   Jeff Moyer   cfq: break apart ...
3308
3309
3310
3311
3312
3313
3314
  			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
  			cfqq = split_cfqq(cic, cfqq);
  			if (!cfqq)
  				goto new_queue;
  		}
  
  		/*
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3315
3316
3317
3318
3319
3320
3321
  		 * Check to see if this queue is scheduled to merge with
  		 * another, closely cooperating queue.  The merging of
  		 * queues happens here as it must be done in process context.
  		 * The reference on new_cfqq was taken in merge_cfqqs.
  		 */
  		if (cfqq->new_cfqq)
  			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3322
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3323
3324
  
  	cfqq->allocated[rw]++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3325

6fae9c251   Jens Axboe   Merge commit 'v2....
3326
  	cfqq->ref++;
a612fddf0   Tejun Heo   block, cfq: move ...
3327
3328
  	rq->elv.priv[0] = cfqq;
  	rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
216284c35   Tejun Heo   block, cfq: fix r...
3329
  	spin_unlock_irq(q->queue_lock);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3330
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3331
  }
65f27f384   David Howells   WorkStruct: Pass ...
3332
  static void cfq_kick_queue(struct work_struct *work)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3333
  {
65f27f384   David Howells   WorkStruct: Pass ...
3334
  	struct cfq_data *cfqd =
23e018a1b   Jens Axboe   block: get rid of...
3335
  		container_of(work, struct cfq_data, unplug_work);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3336
  	struct request_queue *q = cfqd->queue;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3337

40bb54d19   Jens Axboe   cfq-iosched: no n...
3338
  	spin_lock_irq(q->queue_lock);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3339
  	__blk_run_queue(cfqd->queue);
40bb54d19   Jens Axboe   cfq-iosched: no n...
3340
  	spin_unlock_irq(q->queue_lock);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
  }
  
  /*
   * Timer running if the active_queue is currently idling inside its time slice
   */
  static void cfq_idle_slice_timer(unsigned long data)
  {
  	struct cfq_data *cfqd = (struct cfq_data *) data;
  	struct cfq_queue *cfqq;
  	unsigned long flags;
3c6bd2f87   Jens Axboe   cfq-iosched: chec...
3351
  	int timed_out = 1;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3352

7b679138b   Jens Axboe   cfq-iosched: add ...
3353
  	cfq_log(cfqd, "idle timer fired");
22e2c507c   Jens Axboe   [PATCH] Update cf...
3354
  	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
fe094d98e   Jens Axboe   cfq-iosched: make...
3355
3356
  	cfqq = cfqd->active_queue;
  	if (cfqq) {
3c6bd2f87   Jens Axboe   cfq-iosched: chec...
3357
  		timed_out = 0;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3358
  		/*
b029195dd   Jens Axboe   cfq-iosched: don'...
3359
3360
3361
3362
3363
3364
  		 * We saw a request before the queue expired, let it through
  		 */
  		if (cfq_cfqq_must_dispatch(cfqq))
  			goto out_kick;
  
  		/*
22e2c507c   Jens Axboe   [PATCH] Update cf...
3365
3366
  		 * expired
  		 */
44f7c1606   Jens Axboe   cfq-iosched: defe...
3367
  		if (cfq_slice_used(cfqq))
22e2c507c   Jens Axboe   [PATCH] Update cf...
3368
3369
3370
3371
3372
3373
  			goto expire;
  
  		/*
  		 * only expire and reinvoke request handler, if there are
  		 * other queues with pending requests
  		 */
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3374
  		if (!cfqd->busy_queues)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3375
  			goto out_cont;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3376
3377
3378
3379
  
  		/*
  		 * not expired and it has a request pending, let it dispatch
  		 */
75e50984f   Jens Axboe   cfq-iosched: kill...
3380
  		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507c   Jens Axboe   [PATCH] Update cf...
3381
  			goto out_kick;
76280aff1   Corrado Zoccolo   cfq-iosched: idli...
3382
3383
3384
3385
3386
  
  		/*
  		 * Queue depth flag is reset only when the idle didn't succeed
  		 */
  		cfq_clear_cfqq_deep(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3387
3388
  	}
  expire:
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3389
  	cfq_slice_expired(cfqd, timed_out);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3390
  out_kick:
23e018a1b   Jens Axboe   block: get rid of...
3391
  	cfq_schedule_dispatch(cfqd);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3392
3393
3394
  out_cont:
  	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  }
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3395
3396
3397
  static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  {
  	del_timer_sync(&cfqd->idle_slice_timer);
23e018a1b   Jens Axboe   block: get rid of...
3398
  	cancel_work_sync(&cfqd->unplug_work);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3399
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
3400

c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3401
3402
3403
3404
3405
3406
3407
3408
3409
  static void cfq_put_async_queues(struct cfq_data *cfqd)
  {
  	int i;
  
  	for (i = 0; i < IOPRIO_BE_NR; i++) {
  		if (cfqd->async_cfqq[0][i])
  			cfq_put_queue(cfqd->async_cfqq[0][i]);
  		if (cfqd->async_cfqq[1][i])
  			cfq_put_queue(cfqd->async_cfqq[1][i]);
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3410
  	}
2389d1ef1   Oleg Nesterov   cfq: fix IOPRIO_C...
3411
3412
3413
  
  	if (cfqd->async_idle_cfqq)
  		cfq_put_queue(cfqd->async_idle_cfqq);
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3414
  }
b374d18a4   Jens Axboe   block: get rid of...
3415
  static void cfq_exit_queue(struct elevator_queue *e)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3416
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3417
  	struct cfq_data *cfqd = e->elevator_data;
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3418
  	struct request_queue *q = cfqd->queue;
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3419
  	bool wait = false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3420

3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3421
  	cfq_shutdown_timer_wq(cfqd);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3422

d9ff41879   Al Viro   [PATCH] make cfq_...
3423
  	spin_lock_irq(q->queue_lock);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3424

d9ff41879   Al Viro   [PATCH] make cfq_...
3425
  	if (cfqd->active_queue)
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3426
  		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3427

c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3428
  	cfq_put_async_queues(cfqd);
b1c357696   Vivek Goyal   blkio: Take care ...
3429
  	cfq_release_cfq_groups(cfqd);
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3430
3431
3432
3433
3434
3435
3436
  
  	/*
  	 * If there are groups which we could not unlink from blkcg list,
  	 * wait for a rcu period for them to be freed.
  	 */
  	if (cfqd->nr_blkcg_linked_grps)
  		wait = true;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
3437

d9ff41879   Al Viro   [PATCH] make cfq_...
3438
  	spin_unlock_irq(q->queue_lock);
a90d742e4   Al Viro   [PATCH] don't bot...
3439
3440
  
  	cfq_shutdown_timer_wq(cfqd);
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
  	/*
  	 * Wait for cfqg->blkg->key accessors to exit their grace periods.
  	 * Do this wait only if there are other unlinked groups out
  	 * there. This can happen if cgroup deletion path claimed the
  	 * responsibility of cleaning up a group before queue cleanup code
  	 * get to the group.
  	 *
  	 * Do not call synchronize_rcu() unconditionally as there are drivers
  	 * which create/delete request queue hundreds of times during scan/boot
  	 * and synchronize_rcu() can take significant time and slow down boot.
  	 */
  	if (wait)
  		synchronize_rcu();
2abae55f5   Vivek Goyal   cfq-iosched: Fix ...
3454
3455
3456
3457
3458
  
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	/* Free up per cpu stats for root group */
  	free_percpu(cfqd->root_group.blkg.stats_cpu);
  #endif
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3459
  	kfree(cfqd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3460
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3461
  static void *cfq_init_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3462
3463
  {
  	struct cfq_data *cfqd;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
3464
  	int i, j;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
3465
  	struct cfq_group *cfqg;
615f0259e   Vivek Goyal   blkio: Implement ...
3466
  	struct cfq_rb_root *st;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3467

94f6030ca   Christoph Lameter   Slab allocators: ...
3468
  	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
a73f730d0   Tejun Heo   block, cfq: move ...
3469
  	if (!cfqd)
bc1c11697   Jens Axboe   [PATCH] elevator ...
3470
  		return NULL;
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
3471

1fa8f6d68   Vivek Goyal   blkio: Introduce ...
3472
3473
  	/* Init root service tree */
  	cfqd->grp_service_tree = CFQ_RB_ROOT;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
3474
3475
  	/* Init root group */
  	cfqg = &cfqd->root_group;
615f0259e   Vivek Goyal   blkio: Implement ...
3476
3477
  	for_each_cfqg_st(cfqg, i, j, st)
  		*st = CFQ_RB_ROOT;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
3478
  	RB_CLEAR_NODE(&cfqg->rb_node);
26a2ac009   Jens Axboe   cfq-iosched: clea...
3479

25bc6b077   Vivek Goyal   blkio: Introduce ...
3480
3481
  	/* Give preference to root group over other groups */
  	cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
3482
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
b1c357696   Vivek Goyal   blkio: Take care ...
3483
  	/*
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3484
3485
3486
3487
3488
  	 * Set root group reference to 2. One reference will be dropped when
  	 * all groups on cfqd->cfqg_list are being deleted during queue exit.
  	 * Other reference will remain there as we don't want to delete this
  	 * group as it is statically allocated and gets destroyed when
  	 * throtl_data goes away.
b1c357696   Vivek Goyal   blkio: Take care ...
3489
  	 */
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3490
  	cfqg->ref = 2;
5624a4e44   Vivek Goyal   blk-throttle: Mak...
3491
3492
3493
3494
3495
3496
  
  	if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
  		kfree(cfqg);
  		kfree(cfqd);
  		return NULL;
  	}
dcf097b24   Vivek Goyal   blk-cgroup: Fix R...
3497
  	rcu_read_lock();
5624a4e44   Vivek Goyal   blk-throttle: Mak...
3498

e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3499
3500
  	cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
  					(void *)cfqd, 0);
dcf097b24   Vivek Goyal   blk-cgroup: Fix R...
3501
  	rcu_read_unlock();
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3502
3503
3504
3505
  	cfqd->nr_blkcg_linked_grps++;
  
  	/* Add group on cfqd->cfqg_list */
  	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
3506
  #endif
26a2ac009   Jens Axboe   cfq-iosched: clea...
3507
3508
3509
3510
3511
3512
3513
  	/*
  	 * Not strictly needed (since RB_ROOT just clears the node and we
  	 * zeroed cfqd on alloc), but better be safe in case someone decides
  	 * to add magic to the rb code
  	 */
  	for (i = 0; i < CFQ_PRIO_LISTS; i++)
  		cfqd->prio_trees[i] = RB_ROOT;
6118b70b3   Jens Axboe   cfq-iosched: get ...
3514
3515
3516
3517
3518
3519
  	/*
  	 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
  	 * Grab a permanent reference to it, so that the normal code flow
  	 * will not attempt to free it.
  	 */
  	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
30d7b9448   Shaohua Li   block cfq: don't ...
3520
  	cfqd->oom_cfqq.ref++;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
3521
  	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
6118b70b3   Jens Axboe   cfq-iosched: get ...
3522

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3523
  	cfqd->queue = q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3524

22e2c507c   Jens Axboe   [PATCH] Update cf...
3525
3526
3527
  	init_timer(&cfqd->idle_slice_timer);
  	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
23e018a1b   Jens Axboe   block: get rid of...
3528
  	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3529

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3530
  	cfqd->cfq_quantum = cfq_quantum;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3531
3532
  	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3533
3534
  	cfqd->cfq_back_max = cfq_back_max;
  	cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3535
3536
3537
3538
  	cfqd->cfq_slice[0] = cfq_slice_async;
  	cfqd->cfq_slice[1] = cfq_slice_sync;
  	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  	cfqd->cfq_slice_idle = cfq_slice_idle;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3539
  	cfqd->cfq_group_idle = cfq_group_idle;
963b72fc6   Jens Axboe   cfq-iosched: rena...
3540
  	cfqd->cfq_latency = 1;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
3541
  	cfqd->hw_tag = -1;
edc71131c   Corrado Zoccolo   cfq-iosched: comm...
3542
3543
3544
3545
  	/*
  	 * we optimistically start assuming sync ops weren't delayed in last
  	 * second, in order to have larger depth for async operations.
  	 */
573412b29   Corrado Zoccolo   cfq-iosched: redu...
3546
  	cfqd->last_delayed_sync = jiffies - HZ;
bc1c11697   Jens Axboe   [PATCH] elevator ...
3547
  	return cfqd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3548
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3549
3550
3551
  /*
   * sysfs parts below -->
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
  static ssize_t
  cfq_var_show(unsigned int var, char *page)
  {
  	return sprintf(page, "%d
  ", var);
  }
  
  static ssize_t
  cfq_var_store(unsigned int *var, const char *page, size_t count)
  {
  	char *p = (char *) page;
  
  	*var = simple_strtoul(p, &p, 10);
  	return count;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3567
  #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
b374d18a4   Jens Axboe   block: get rid of...
3568
  static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3569
  {									\
3d1ab40f4   Al Viro   [PATCH] elevator_...
3570
  	struct cfq_data *cfqd = e->elevator_data;			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3571
3572
3573
3574
3575
3576
  	unsigned int __data = __VAR;					\
  	if (__CONV)							\
  		__data = jiffies_to_msecs(__data);			\
  	return cfq_var_show(__data, (page));				\
  }
  SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3577
3578
  SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3579
3580
  SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3581
  SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3582
  SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3583
3584
3585
  SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc6   Jens Axboe   cfq-iosched: rena...
3586
  SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3587
3588
3589
  #undef SHOW_FUNCTION
  
  #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
b374d18a4   Jens Axboe   block: get rid of...
3590
  static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3591
  {									\
3d1ab40f4   Al Viro   [PATCH] elevator_...
3592
  	struct cfq_data *cfqd = e->elevator_data;			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
  	unsigned int __data;						\
  	int ret = cfq_var_store(&__data, (page), count);		\
  	if (__data < (MIN))						\
  		__data = (MIN);						\
  	else if (__data > (MAX))					\
  		__data = (MAX);						\
  	if (__CONV)							\
  		*(__PTR) = msecs_to_jiffies(__data);			\
  	else								\
  		*(__PTR) = __data;					\
  	return ret;							\
  }
  STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98e   Jens Axboe   cfq-iosched: make...
3606
3607
3608
3609
  STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
  		UINT_MAX, 1);
  STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
  		UINT_MAX, 1);
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3610
  STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98e   Jens Axboe   cfq-iosched: make...
3611
3612
  STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
  		UINT_MAX, 0);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3613
  STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3614
  STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3615
3616
  STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98e   Jens Axboe   cfq-iosched: make...
3617
3618
  STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
  		UINT_MAX, 0);
963b72fc6   Jens Axboe   cfq-iosched: rena...
3619
  STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3620
  #undef STORE_FUNCTION
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3621
3622
3623
3624
3625
  #define CFQ_ATTR(name) \
  	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  
  static struct elv_fs_entry cfq_attrs[] = {
  	CFQ_ATTR(quantum),
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3626
3627
3628
3629
3630
3631
3632
3633
  	CFQ_ATTR(fifo_expire_sync),
  	CFQ_ATTR(fifo_expire_async),
  	CFQ_ATTR(back_seek_max),
  	CFQ_ATTR(back_seek_penalty),
  	CFQ_ATTR(slice_sync),
  	CFQ_ATTR(slice_async),
  	CFQ_ATTR(slice_async_rq),
  	CFQ_ATTR(slice_idle),
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3634
  	CFQ_ATTR(group_idle),
963b72fc6   Jens Axboe   cfq-iosched: rena...
3635
  	CFQ_ATTR(low_latency),
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3636
  	__ATTR_NULL
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3637
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3638
3639
3640
3641
3642
  static struct elevator_type iosched_cfq = {
  	.ops = {
  		.elevator_merge_fn = 		cfq_merge,
  		.elevator_merged_fn =		cfq_merged_request,
  		.elevator_merge_req_fn =	cfq_merged_requests,
da7752650   Jens Axboe   [PATCH] cfq-iosch...
3643
  		.elevator_allow_merge_fn =	cfq_allow_merge,
812d40264   Divyesh Shah   blkio: Add io_mer...
3644
  		.elevator_bio_merged_fn =	cfq_bio_merged,
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3645
  		.elevator_dispatch_fn =		cfq_dispatch_requests,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3646
  		.elevator_add_req_fn =		cfq_insert_request,
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3647
  		.elevator_activate_req_fn =	cfq_activate_request,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3648
  		.elevator_deactivate_req_fn =	cfq_deactivate_request,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3649
  		.elevator_completed_req_fn =	cfq_completed_request,
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
3650
3651
  		.elevator_former_req_fn =	elv_rb_former_request,
  		.elevator_latter_req_fn =	elv_rb_latter_request,
9b84cacd0   Tejun Heo   block, cfq: restr...
3652
  		.elevator_init_icq_fn =		cfq_init_icq,
7e5a87944   Tejun Heo   block, cfq: move ...
3653
  		.elevator_exit_icq_fn =		cfq_exit_icq,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3654
3655
3656
3657
3658
3659
  		.elevator_set_req_fn =		cfq_set_request,
  		.elevator_put_req_fn =		cfq_put_request,
  		.elevator_may_queue_fn =	cfq_may_queue,
  		.elevator_init_fn =		cfq_init_queue,
  		.elevator_exit_fn =		cfq_exit_queue,
  	},
3d3c2379f   Tejun Heo   block, cfq: move ...
3660
3661
  	.icq_size	=	sizeof(struct cfq_io_cq),
  	.icq_align	=	__alignof__(struct cfq_io_cq),
3d1ab40f4   Al Viro   [PATCH] elevator_...
3662
  	.elevator_attrs =	cfq_attrs,
3d3c2379f   Tejun Heo   block, cfq: move ...
3663
  	.elevator_name	=	"cfq",
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3664
3665
  	.elevator_owner =	THIS_MODULE,
  };
3e2520668   Vivek Goyal   blkio: Implement ...
3666
3667
3668
3669
3670
3671
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  static struct blkio_policy_type blkio_policy_cfq = {
  	.ops = {
  		.blkio_unlink_group_fn =	cfq_unlink_blkio_group,
  		.blkio_update_group_weight_fn =	cfq_update_blkio_group_weight,
  	},
062a644d6   Vivek Goyal   blk-cgroup: Prepa...
3672
  	.plid = BLKIO_POLICY_PROP,
3e2520668   Vivek Goyal   blkio: Implement ...
3673
3674
3675
3676
  };
  #else
  static struct blkio_policy_type blkio_policy_cfq;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3677
3678
  static int __init cfq_init(void)
  {
3d3c2379f   Tejun Heo   block, cfq: move ...
3679
  	int ret;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3680
3681
3682
3683
3684
3685
3686
  	/*
  	 * could be 0 on HZ < 1000 setups
  	 */
  	if (!cfq_slice_async)
  		cfq_slice_async = 1;
  	if (!cfq_slice_idle)
  		cfq_slice_idle = 1;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3687
3688
3689
3690
3691
3692
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	if (!cfq_group_idle)
  		cfq_group_idle = 1;
  #else
  		cfq_group_idle = 0;
  #endif
3d3c2379f   Tejun Heo   block, cfq: move ...
3693
3694
  	cfq_pool = KMEM_CACHE(cfq_queue, 0);
  	if (!cfq_pool)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3695
  		return -ENOMEM;
3d3c2379f   Tejun Heo   block, cfq: move ...
3696
3697
3698
3699
3700
  	ret = elv_register(&iosched_cfq);
  	if (ret) {
  		kmem_cache_destroy(cfq_pool);
  		return ret;
  	}
3d3c2379f   Tejun Heo   block, cfq: move ...
3701

3e2520668   Vivek Goyal   blkio: Implement ...
3702
  	blkio_policy_register(&blkio_policy_cfq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3703

2fdd82bd8   Adrian Bunk   block: let elv_re...
3704
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3705
3706
3707
3708
  }
  
  static void __exit cfq_exit(void)
  {
3e2520668   Vivek Goyal   blkio: Implement ...
3709
  	blkio_policy_unregister(&blkio_policy_cfq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3710
  	elv_unregister(&iosched_cfq);
3d3c2379f   Tejun Heo   block, cfq: move ...
3711
  	kmem_cache_destroy(cfq_pool);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3712
3713
3714
3715
3716
3717
3718
3719
  }
  
  module_init(cfq_init);
  module_exit(cfq_exit);
  
  MODULE_AUTHOR("Jens Axboe");
  MODULE_LICENSE("GPL");
  MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");