Blame view

block/cfq-iosched.c 104 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
6
   *  CFQ, or complete fairness queueing, disk scheduler.
   *
   *  Based on ideas from a previously unfinished io
   *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   *
0fe234795   Jens Axboe   [PATCH] Update ax...
7
   *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
  #include <linux/module.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
10
  #include <linux/slab.h>
1cc9be68e   Al Viro   [PATCH] noise rem...
11
12
  #include <linux/blkdev.h>
  #include <linux/elevator.h>
ad5ebd2fa   Randy Dunlap   block: jiffies fixes
13
  #include <linux/jiffies.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
  #include <linux/rbtree.h>
22e2c507c   Jens Axboe   [PATCH] Update cf...
15
  #include <linux/ioprio.h>
7b679138b   Jens Axboe   cfq-iosched: add ...
16
  #include <linux/blktrace_api.h>
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
17
  #include "cfq.h"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
19
20
21
  
  /*
   * tunables
   */
fe094d98e   Jens Axboe   cfq-iosched: make...
22
  /* max queue in one round of service */
abc3c744d   Shaohua Li   cfq-iosched: quan...
23
  static const int cfq_quantum = 8;
64100099e   Arjan van de Ven   [BLOCK] mark some...
24
  static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
fe094d98e   Jens Axboe   cfq-iosched: make...
25
26
27
28
  /* maximum backwards seek, in KiB */
  static const int cfq_back_max = 16 * 1024;
  /* penalty of a backwards seek */
  static const int cfq_back_penalty = 2;
64100099e   Arjan van de Ven   [BLOCK] mark some...
29
  static const int cfq_slice_sync = HZ / 10;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
30
  static int cfq_slice_async = HZ / 25;
64100099e   Arjan van de Ven   [BLOCK] mark some...
31
  static const int cfq_slice_async_rq = 2;
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
32
  static int cfq_slice_idle = HZ / 125;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
33
  static int cfq_group_idle = HZ / 125;
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
34
35
  static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  static const int cfq_hist_divisor = 4;
22e2c507c   Jens Axboe   [PATCH] Update cf...
36

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
37
  /*
0871714e0   Jens Axboe   cfq-iosched: rela...
38
   * offset from end of service tree
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
39
   */
0871714e0   Jens Axboe   cfq-iosched: rela...
40
  #define CFQ_IDLE_DELAY		(HZ / 5)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
41
42
43
44
45
  
  /*
   * below this threshold, we consider thinktime immediate
   */
  #define CFQ_MIN_TT		(2)
22e2c507c   Jens Axboe   [PATCH] Update cf...
46
  #define CFQ_SLICE_SCALE		(5)
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
47
  #define CFQ_HW_QUEUE_MIN	(5)
25bc6b077   Vivek Goyal   blkio: Introduce ...
48
  #define CFQ_SERVICE_SHIFT       12
22e2c507c   Jens Axboe   [PATCH] Update cf...
49

3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
50
  #define CFQQ_SEEK_THR		(sector_t)(8 * 100)
e9ce335df   Shaohua Li   cfq-iosched: fix ...
51
  #define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)
41647e7a9   Corrado Zoccolo   cfq-iosched: reth...
52
  #define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
53
  #define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)
ae54abed6   Shaohua Li   cfq-iosched: spli...
54

fe094d98e   Jens Axboe   cfq-iosched: make...
55
56
  #define RQ_CIC(rq)		\
  	((struct cfq_io_context *) (rq)->elevator_private)
7b679138b   Jens Axboe   cfq-iosched: add ...
57
  #define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elevator_private2)
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
58
  #define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elevator_private3)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
59

e18b890bb   Christoph Lameter   [PATCH] slab: rem...
60
61
  static struct kmem_cache *cfq_pool;
  static struct kmem_cache *cfq_ioc_pool;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
62

245b2e70e   Tejun Heo   percpu: clean up ...
63
  static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
334e94de9   Al Viro   [PATCH] deal with...
64
  static struct completion *ioc_gone;
9a11b4ed0   Jens Axboe   cfq-iosched: prop...
65
  static DEFINE_SPINLOCK(ioc_gone_lock);
334e94de9   Al Viro   [PATCH] deal with...
66

80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
67
68
  static DEFINE_SPINLOCK(cic_index_lock);
  static DEFINE_IDA(cic_index_ida);
22e2c507c   Jens Axboe   [PATCH] Update cf...
69
70
  #define CFQ_PRIO_LISTS		IOPRIO_BE_NR
  #define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507c   Jens Axboe   [PATCH] Update cf...
71
  #define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
72
  #define sample_valid(samples)	((samples) > 80)
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
73
  #define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
74

22e2c507c   Jens Axboe   [PATCH] Update cf...
75
  /*
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
76
77
78
79
80
81
82
83
   * Most of our rbtree usage is for sorting with min extraction, so
   * if we cache the leftmost node we don't have to walk down the tree
   * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
   * move this into the elevator for the rq sorting as well.
   */
  struct cfq_rb_root {
  	struct rb_root rb;
  	struct rb_node *left;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
84
  	unsigned count;
73e9ffdd0   Richard Kennedy   cfq: remove 8 byt...
85
  	unsigned total_weight;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
86
  	u64 min_vdisktime;
25bc6b077   Vivek Goyal   blkio: Introduce ...
87
  	struct rb_node *active;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
88
  };
73e9ffdd0   Richard Kennedy   cfq: remove 8 byt...
89
90
  #define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
  			.count = 0, .min_vdisktime = 0, }
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
91
92
  
  /*
6118b70b3   Jens Axboe   cfq-iosched: get ...
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
   * Per process-grouping structure
   */
  struct cfq_queue {
  	/* reference count */
  	atomic_t ref;
  	/* various state flags, see below */
  	unsigned int flags;
  	/* parent cfq_data */
  	struct cfq_data *cfqd;
  	/* service_tree member */
  	struct rb_node rb_node;
  	/* service_tree key */
  	unsigned long rb_key;
  	/* prio tree member */
  	struct rb_node p_node;
  	/* prio tree root we belong to, if any */
  	struct rb_root *p_root;
  	/* sorted list of pending requests */
  	struct rb_root sort_list;
  	/* if fifo isn't expired, next request to serve */
  	struct request *next_rq;
  	/* requests queued in sort_list */
  	int queued[2];
  	/* currently allocated requests */
  	int allocated[2];
  	/* fifo list of requests in sort_list */
  	struct list_head fifo;
dae739ebc   Vivek Goyal   blkio: Group time...
120
121
  	/* time when queue got scheduled in to dispatch first request. */
  	unsigned long dispatch_start;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
122
  	unsigned int allocated_slice;
c4081ba5c   Richard Kennedy   cfq: reorder cfq_...
123
  	unsigned int slice_dispatch;
dae739ebc   Vivek Goyal   blkio: Group time...
124
125
  	/* time when first request from queue completed and slice started. */
  	unsigned long slice_start;
6118b70b3   Jens Axboe   cfq-iosched: get ...
126
127
  	unsigned long slice_end;
  	long slice_resid;
6118b70b3   Jens Axboe   cfq-iosched: get ...
128
129
130
131
132
133
134
135
136
  
  	/* pending metadata requests */
  	int meta_pending;
  	/* number of requests that are on the dispatch list or inside driver */
  	int dispatched;
  
  	/* io prio of this group */
  	unsigned short ioprio, org_ioprio;
  	unsigned short ioprio_class, org_ioprio_class;
c4081ba5c   Richard Kennedy   cfq: reorder cfq_...
137
  	pid_t pid;
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
138
  	u32 seek_history;
b2c18e1e0   Jeff Moyer   cfq: calculate th...
139
  	sector_t last_request_pos;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
140
  	struct cfq_rb_root *service_tree;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
141
  	struct cfq_queue *new_cfqq;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
142
  	struct cfq_group *cfqg;
ae30c2865   Vivek Goyal   blkio: Implement ...
143
  	struct cfq_group *orig_cfqg;
c4e7893eb   Vivek Goyal   cfq-iosched: blkt...
144
145
  	/* Number of sectors dispatched from queue in single dispatch round */
  	unsigned long nr_sectors;
6118b70b3   Jens Axboe   cfq-iosched: get ...
146
147
148
  };
  
  /*
718eee057   Corrado Zoccolo   cfq-iosched: fair...
149
   * First index in the service_trees.
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
150
151
152
   * IDLE is handled separately, so it has negative index
   */
  enum wl_prio_t {
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
153
  	BE_WORKLOAD = 0,
615f0259e   Vivek Goyal   blkio: Implement ...
154
155
  	RT_WORKLOAD = 1,
  	IDLE_WORKLOAD = 2,
b4627321e   Vivek Goyal   cfq-iosched: Fix ...
156
  	CFQ_PRIO_NR,
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
157
158
159
  };
  
  /*
718eee057   Corrado Zoccolo   cfq-iosched: fair...
160
161
162
163
164
165
166
   * Second index in the service_trees.
   */
  enum wl_type_t {
  	ASYNC_WORKLOAD = 0,
  	SYNC_NOIDLE_WORKLOAD = 1,
  	SYNC_WORKLOAD = 2
  };
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
167
168
  /* This is per cgroup per device grouping structure */
  struct cfq_group {
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
169
170
171
172
173
  	/* group service_tree member */
  	struct rb_node rb_node;
  
  	/* group service_tree key */
  	u64 vdisktime;
25bc6b077   Vivek Goyal   blkio: Introduce ...
174
  	unsigned int weight;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
175
176
177
178
  	bool on_st;
  
  	/* number of cfqq currently on this group */
  	int nr_cfqq;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
179
  	/*
b4627321e   Vivek Goyal   cfq-iosched: Fix ...
180
181
182
183
184
185
186
187
188
189
190
191
  	 * Per group busy queus average. Useful for workload slice calc. We
  	 * create the array for each prio class but at run time it is used
  	 * only for RT and BE class and slot for IDLE class remains unused.
  	 * This is primarily done to avoid confusion and a gcc warning.
  	 */
  	unsigned int busy_queues_avg[CFQ_PRIO_NR];
  	/*
  	 * rr lists of queues with requests. We maintain service trees for
  	 * RT and BE classes. These trees are subdivided in subclasses
  	 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
  	 * class there is no subclassification and all the cfq queues go on
  	 * a single tree service_tree_idle.
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
192
193
194
195
  	 * Counts are embedded in the cfq_rb_root
  	 */
  	struct cfq_rb_root service_trees[2][3];
  	struct cfq_rb_root service_tree_idle;
dae739ebc   Vivek Goyal   blkio: Group time...
196
197
198
199
  
  	unsigned long saved_workload_slice;
  	enum wl_type_t saved_workload;
  	enum wl_prio_t saved_serving_prio;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
200
201
202
  	struct blkio_group blkg;
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	struct hlist_node cfqd_node;
b1c357696   Vivek Goyal   blkio: Take care ...
203
  	atomic_t ref;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
204
  #endif
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
205
206
  	/* number of requests that are on the dispatch list or inside driver */
  	int dispatched;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
207
  };
718eee057   Corrado Zoccolo   cfq-iosched: fair...
208
209
  
  /*
22e2c507c   Jens Axboe   [PATCH] Update cf...
210
211
   * Per block device queue structure
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
212
  struct cfq_data {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
213
  	struct request_queue *queue;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
214
215
  	/* Root service tree for cfq_groups */
  	struct cfq_rb_root grp_service_tree;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
216
  	struct cfq_group root_group;
22e2c507c   Jens Axboe   [PATCH] Update cf...
217
218
  
  	/*
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
219
  	 * The priority currently being served
22e2c507c   Jens Axboe   [PATCH] Update cf...
220
  	 */
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
221
  	enum wl_prio_t serving_prio;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
222
223
  	enum wl_type_t serving_type;
  	unsigned long workload_expires;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
224
  	struct cfq_group *serving_group;
a36e71f99   Jens Axboe   cfq-iosched: add ...
225
226
227
228
229
230
231
  
  	/*
  	 * Each priority tree is sorted by next_request position.  These
  	 * trees are used when determining if two or more queues are
  	 * interleaving requests (see cfq_close_cooperator).
  	 */
  	struct rb_root prio_trees[CFQ_PRIO_LISTS];
22e2c507c   Jens Axboe   [PATCH] Update cf...
232
  	unsigned int busy_queues;
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
233
234
  	int rq_in_driver;
  	int rq_in_flight[2];
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
235
236
237
238
239
  
  	/*
  	 * queue-depth detection
  	 */
  	int rq_queued;
25776e359   Jens Axboe   [PATCH] cfq-iosch...
240
  	int hw_tag;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
241
242
243
244
245
246
247
248
  	/*
  	 * hw_tag can be
  	 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
  	 *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
  	 *  0 => no NCQ
  	 */
  	int hw_tag_est_depth;
  	unsigned int hw_tag_samples;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
249

22e2c507c   Jens Axboe   [PATCH] Update cf...
250
  	/*
22e2c507c   Jens Axboe   [PATCH] Update cf...
251
252
253
  	 * idle window management
  	 */
  	struct timer_list idle_slice_timer;
23e018a1b   Jens Axboe   block: get rid of...
254
  	struct work_struct unplug_work;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
255

22e2c507c   Jens Axboe   [PATCH] Update cf...
256
257
  	struct cfq_queue *active_queue;
  	struct cfq_io_context *active_cic;
22e2c507c   Jens Axboe   [PATCH] Update cf...
258

c2dea2d1f   Vasily Tarasov   cfq: async queue ...
259
260
261
262
263
  	/*
  	 * async queue for each priority case
  	 */
  	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
  	struct cfq_queue *async_idle_cfqq;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
264

6d048f531   Jens Axboe   cfq-iosched: deve...
265
  	sector_t last_position;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
266

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
267
268
269
270
  	/*
  	 * tunables, see top of file
  	 */
  	unsigned int cfq_quantum;
22e2c507c   Jens Axboe   [PATCH] Update cf...
271
  	unsigned int cfq_fifo_expire[2];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
272
273
  	unsigned int cfq_back_penalty;
  	unsigned int cfq_back_max;
22e2c507c   Jens Axboe   [PATCH] Update cf...
274
275
276
  	unsigned int cfq_slice[2];
  	unsigned int cfq_slice_async_rq;
  	unsigned int cfq_slice_idle;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
277
  	unsigned int cfq_group_idle;
963b72fc6   Jens Axboe   cfq-iosched: rena...
278
  	unsigned int cfq_latency;
ae30c2865   Vivek Goyal   blkio: Implement ...
279
  	unsigned int cfq_group_isolation;
d9ff41879   Al Viro   [PATCH] make cfq_...
280

80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
281
  	unsigned int cic_index;
d9ff41879   Al Viro   [PATCH] make cfq_...
282
  	struct list_head cic_list;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
283

6118b70b3   Jens Axboe   cfq-iosched: get ...
284
285
286
287
  	/*
  	 * Fallback dummy cfqq for extreme OOM conditions
  	 */
  	struct cfq_queue oom_cfqq;
365722bb9   Vivek Goyal   cfq-iosched: dela...
288

573412b29   Corrado Zoccolo   cfq-iosched: redu...
289
  	unsigned long last_delayed_sync;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
290
291
292
  
  	/* List of cfq groups being managed on this device*/
  	struct hlist_head cfqg_list;
bb729bc98   Jens Axboe   cfq-iosched: use ...
293
  	struct rcu_head rcu;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
294
  };
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
295
  static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
296
297
  static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
  					    enum wl_prio_t prio,
65b32a573   Vivek Goyal   cfq-iosched: Remo...
298
  					    enum wl_type_t type)
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
299
  {
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
300
301
  	if (!cfqg)
  		return NULL;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
302
  	if (prio == IDLE_WORKLOAD)
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
303
  		return &cfqg->service_tree_idle;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
304

cdb16e8f7   Vivek Goyal   blkio: Introduce ...
305
  	return &cfqg->service_trees[prio][type];
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
306
  }
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
307
  enum cfqq_state_flags {
b0b8d7494   Jens Axboe   cfq-iosched: docu...
308
309
  	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
  	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
b029195dd   Jens Axboe   cfq-iosched: don'...
310
  	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
b0b8d7494   Jens Axboe   cfq-iosched: docu...
311
  	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
b0b8d7494   Jens Axboe   cfq-iosched: docu...
312
313
314
  	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
  	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
  	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
44f7c1606   Jens Axboe   cfq-iosched: defe...
315
  	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
91fac317a   Vasily Tarasov   cfq-iosched: get ...
316
  	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
b3b6d0408   Jeff Moyer   cfq: change the m...
317
  	CFQ_CFQQ_FLAG_coop,		/* cfqq is shared */
ae54abed6   Shaohua Li   cfq-iosched: spli...
318
  	CFQ_CFQQ_FLAG_split_coop,	/* shared cfqq will be splitted */
76280aff1   Corrado Zoccolo   cfq-iosched: idli...
319
  	CFQ_CFQQ_FLAG_deep,		/* sync cfqq experienced large depth */
f75edf2dc   Vivek Goyal   blkio: Wait for c...
320
  	CFQ_CFQQ_FLAG_wait_busy,	/* Waiting for next request */
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
321
322
323
324
325
  };
  
  #define CFQ_CFQQ_FNS(name)						\
  static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
  {									\
fe094d98e   Jens Axboe   cfq-iosched: make...
326
  	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
327
328
329
  }									\
  static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
  {									\
fe094d98e   Jens Axboe   cfq-iosched: make...
330
  	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
331
332
333
  }									\
  static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
  {									\
fe094d98e   Jens Axboe   cfq-iosched: make...
334
  	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
335
336
337
338
  }
  
  CFQ_CFQQ_FNS(on_rr);
  CFQ_CFQQ_FNS(wait_request);
b029195dd   Jens Axboe   cfq-iosched: don'...
339
  CFQ_CFQQ_FNS(must_dispatch);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
340
  CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
341
342
343
  CFQ_CFQQ_FNS(fifo_expire);
  CFQ_CFQQ_FNS(idle_window);
  CFQ_CFQQ_FNS(prio_changed);
44f7c1606   Jens Axboe   cfq-iosched: defe...
344
  CFQ_CFQQ_FNS(slice_new);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
345
  CFQ_CFQQ_FNS(sync);
a36e71f99   Jens Axboe   cfq-iosched: add ...
346
  CFQ_CFQQ_FNS(coop);
ae54abed6   Shaohua Li   cfq-iosched: spli...
347
  CFQ_CFQQ_FNS(split_coop);
76280aff1   Corrado Zoccolo   cfq-iosched: idli...
348
  CFQ_CFQQ_FNS(deep);
f75edf2dc   Vivek Goyal   blkio: Wait for c...
349
  CFQ_CFQQ_FNS(wait_busy);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
350
  #undef CFQ_CFQQ_FNS
afc24d49c   Vivek Goyal   blk-cgroup: confi...
351
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
2868ef7b3   Vivek Goyal   blkio: Some debug...
352
353
354
355
356
357
358
359
360
361
  #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
  	blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
  			cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
  			blkg_path(&(cfqq)->cfqg->blkg), ##args);
  
  #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)				\
  	blk_add_trace_msg((cfqd)->queue, "%s " fmt,			\
  				blkg_path(&(cfqg)->blkg), ##args);      \
  
  #else
7b679138b   Jens Axboe   cfq-iosched: add ...
362
363
  #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
  	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
2868ef7b3   Vivek Goyal   blkio: Some debug...
364
365
  #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0);
  #endif
7b679138b   Jens Axboe   cfq-iosched: add ...
366
367
  #define cfq_log(cfqd, fmt, args...)	\
  	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
615f0259e   Vivek Goyal   blkio: Implement ...
368
369
370
371
372
373
374
375
376
  /* Traverses through cfq group service trees */
  #define for_each_cfqg_st(cfqg, i, j, st) \
  	for (i = 0; i <= IDLE_WORKLOAD; i++) \
  		for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
  			: &cfqg->service_tree_idle; \
  			(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
  			(i == IDLE_WORKLOAD && j == 0); \
  			j++, st = i < IDLE_WORKLOAD ? \
  			&cfqg->service_trees[i][j]: NULL) \
02b35081f   Vivek Goyal   cfq-iosched: Do g...
377
378
379
380
381
382
383
384
385
386
387
388
389
390
  static inline bool iops_mode(struct cfq_data *cfqd)
  {
  	/*
  	 * If we are not idling on queues and it is a NCQ drive, parallel
  	 * execution of requests is on and measuring time is not possible
  	 * in most of the cases until and unless we drive shallower queue
  	 * depths and that becomes a performance bottleneck. In such cases
  	 * switch to start providing fairness in terms of number of IOs.
  	 */
  	if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
  		return true;
  	else
  		return false;
  }
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
391
392
393
394
395
396
397
398
  static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
  {
  	if (cfq_class_idle(cfqq))
  		return IDLE_WORKLOAD;
  	if (cfq_class_rt(cfqq))
  		return RT_WORKLOAD;
  	return BE_WORKLOAD;
  }
718eee057   Corrado Zoccolo   cfq-iosched: fair...
399
400
401
402
403
404
405
406
407
  
  static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
  {
  	if (!cfq_cfqq_sync(cfqq))
  		return ASYNC_WORKLOAD;
  	if (!cfq_cfqq_idle_window(cfqq))
  		return SYNC_NOIDLE_WORKLOAD;
  	return SYNC_WORKLOAD;
  }
58ff82f34   Vivek Goyal   blkio: Implement ...
408
409
410
  static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
  					struct cfq_data *cfqd,
  					struct cfq_group *cfqg)
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
411
412
  {
  	if (wl == IDLE_WORKLOAD)
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
413
  		return cfqg->service_tree_idle.count;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
414

cdb16e8f7   Vivek Goyal   blkio: Introduce ...
415
416
417
  	return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
  		+ cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
  		+ cfqg->service_trees[wl][SYNC_WORKLOAD].count;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
418
  }
f26bd1f0a   Vivek Goyal   blkio: Determine ...
419
420
421
422
423
424
  static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
  					struct cfq_group *cfqg)
  {
  	return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
  		+ cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
425
  static void cfq_dispatch_insert(struct request_queue *, struct request *);
a6151c3a5   Jens Axboe   cfq-iosched: appl...
426
  static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
fd0928df9   Jens Axboe   ioprio: move io p...
427
  				       struct io_context *, gfp_t);
4ac845a2e   Jens Axboe   block: cfq: make ...
428
  static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
91fac317a   Vasily Tarasov   cfq-iosched: get ...
429
430
431
  						struct io_context *);
  
  static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
a6151c3a5   Jens Axboe   cfq-iosched: appl...
432
  					    bool is_sync)
91fac317a   Vasily Tarasov   cfq-iosched: get ...
433
  {
a6151c3a5   Jens Axboe   cfq-iosched: appl...
434
  	return cic->cfqq[is_sync];
91fac317a   Vasily Tarasov   cfq-iosched: get ...
435
436
437
  }
  
  static inline void cic_set_cfqq(struct cfq_io_context *cic,
a6151c3a5   Jens Axboe   cfq-iosched: appl...
438
  				struct cfq_queue *cfqq, bool is_sync)
91fac317a   Vasily Tarasov   cfq-iosched: get ...
439
  {
a6151c3a5   Jens Axboe   cfq-iosched: appl...
440
  	cic->cfqq[is_sync] = cfqq;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
441
  }
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
442
  #define CIC_DEAD_KEY	1ul
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
443
  #define CIC_DEAD_INDEX_SHIFT	1
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
444
445
446
  
  static inline void *cfqd_dead_key(struct cfq_data *cfqd)
  {
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
447
  	return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
448
449
450
451
452
453
454
455
456
457
458
  }
  
  static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
  {
  	struct cfq_data *cfqd = cic->key;
  
  	if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
  		return NULL;
  
  	return cfqd;
  }
91fac317a   Vasily Tarasov   cfq-iosched: get ...
459
460
461
462
  /*
   * We regard a request as SYNC, if it's either a read or has the SYNC bit
   * set (in which case it could also be direct WRITE).
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
463
  static inline bool cfq_bio_sync(struct bio *bio)
91fac317a   Vasily Tarasov   cfq-iosched: get ...
464
  {
7b6d91dae   Christoph Hellwig   block: unify flag...
465
  	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
466
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
467

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
468
  /*
99f95e528   Andrew Morton   [PATCH] cfq build...
469
470
471
   * scheduler run of queue, if there are requests pending and no one in the
   * driver that will restart queueing
   */
23e018a1b   Jens Axboe   block: get rid of...
472
  static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e528   Andrew Morton   [PATCH] cfq build...
473
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
474
475
  	if (cfqd->busy_queues) {
  		cfq_log(cfqd, "schedule dispatch");
23e018a1b   Jens Axboe   block: get rid of...
476
  		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
7b679138b   Jens Axboe   cfq-iosched: add ...
477
  	}
99f95e528   Andrew Morton   [PATCH] cfq build...
478
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
479
  static int cfq_queue_empty(struct request_queue *q)
99f95e528   Andrew Morton   [PATCH] cfq build...
480
481
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
f04a64246   Vivek Goyal   blkio: Keep queue...
482
  	return !cfqd->rq_queued;
99f95e528   Andrew Morton   [PATCH] cfq build...
483
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
484
  /*
44f7c1606   Jens Axboe   cfq-iosched: defe...
485
486
487
488
   * Scale schedule slice based on io priority. Use the sync time slice only
   * if a queue is marked sync and has sync io queued. A sync queue with async
   * io only, should not get full sync slice length.
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
489
  static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
490
  				 unsigned short prio)
44f7c1606   Jens Axboe   cfq-iosched: defe...
491
  {
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
492
  	const int base_slice = cfqd->cfq_slice[sync];
44f7c1606   Jens Axboe   cfq-iosched: defe...
493

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
494
495
496
497
  	WARN_ON(prio >= IOPRIO_BE_NR);
  
  	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
  }
44f7c1606   Jens Axboe   cfq-iosched: defe...
498

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
499
500
501
502
  static inline int
  cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c1606   Jens Axboe   cfq-iosched: defe...
503
  }
25bc6b077   Vivek Goyal   blkio: Introduce ...
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
  static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
  {
  	u64 d = delta << CFQ_SERVICE_SHIFT;
  
  	d = d * BLKIO_WEIGHT_DEFAULT;
  	do_div(d, cfqg->weight);
  	return d;
  }
  
  static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
  {
  	s64 delta = (s64)(vdisktime - min_vdisktime);
  	if (delta > 0)
  		min_vdisktime = vdisktime;
  
  	return min_vdisktime;
  }
  
  static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
  {
  	s64 delta = (s64)(vdisktime - min_vdisktime);
  	if (delta < 0)
  		min_vdisktime = vdisktime;
  
  	return min_vdisktime;
  }
  
  static void update_min_vdisktime(struct cfq_rb_root *st)
  {
  	u64 vdisktime = st->min_vdisktime;
  	struct cfq_group *cfqg;
  
  	if (st->active) {
  		cfqg = rb_entry_cfqg(st->active);
  		vdisktime = cfqg->vdisktime;
  	}
  
  	if (st->left) {
  		cfqg = rb_entry_cfqg(st->left);
  		vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
  	}
  
  	st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
  }
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
548
549
550
551
552
  /*
   * get averaged number of queues of RT/BE priority.
   * average is updated, with a formula that gives more weight to higher numbers,
   * to quickly follows sudden increases and decrease slowly
   */
58ff82f34   Vivek Goyal   blkio: Implement ...
553
554
  static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
  					struct cfq_group *cfqg, bool rt)
5869619cb   Jens Axboe   cfq-iosched: fix ...
555
  {
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
556
557
558
  	unsigned min_q, max_q;
  	unsigned mult  = cfq_hist_divisor - 1;
  	unsigned round = cfq_hist_divisor / 2;
58ff82f34   Vivek Goyal   blkio: Implement ...
559
  	unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
560

58ff82f34   Vivek Goyal   blkio: Implement ...
561
562
563
  	min_q = min(cfqg->busy_queues_avg[rt], busy);
  	max_q = max(cfqg->busy_queues_avg[rt], busy);
  	cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
564
  		cfq_hist_divisor;
58ff82f34   Vivek Goyal   blkio: Implement ...
565
566
567
568
569
570
571
572
573
  	return cfqg->busy_queues_avg[rt];
  }
  
  static inline unsigned
  cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
  
  	return cfq_target_latency * cfqg->weight / st->total_weight;
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
574
  }
44f7c1606   Jens Axboe   cfq-iosched: defe...
575
576
577
  static inline void
  cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
578
579
  	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
  	if (cfqd->cfq_latency) {
58ff82f34   Vivek Goyal   blkio: Implement ...
580
581
582
583
584
585
  		/*
  		 * interested queues (we consider only the ones with the same
  		 * priority class in the cfq group)
  		 */
  		unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
  						cfq_class_rt(cfqq));
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
586
587
  		unsigned sync_slice = cfqd->cfq_slice[1];
  		unsigned expect_latency = sync_slice * iq;
58ff82f34   Vivek Goyal   blkio: Implement ...
588
589
590
  		unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
  
  		if (expect_latency > group_slice) {
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
591
592
593
594
595
596
597
  			unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
  			/* scale low_slice according to IO priority
  			 * and sync vs async */
  			unsigned low_slice =
  				min(slice, base_low_slice * slice / sync_slice);
  			/* the adapted slice value is scaled to fit all iqs
  			 * into the target latency */
58ff82f34   Vivek Goyal   blkio: Implement ...
598
  			slice = max(slice * group_slice / expect_latency,
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
599
600
601
  				    low_slice);
  		}
  	}
dae739ebc   Vivek Goyal   blkio: Group time...
602
  	cfqq->slice_start = jiffies;
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
603
  	cfqq->slice_end = jiffies + slice;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
604
  	cfqq->allocated_slice = slice;
7b679138b   Jens Axboe   cfq-iosched: add ...
605
  	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
44f7c1606   Jens Axboe   cfq-iosched: defe...
606
607
608
609
610
611
612
  }
  
  /*
   * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
   * isn't valid until the first request from the dispatch is activated
   * and the slice time set.
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
613
  static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c1606   Jens Axboe   cfq-iosched: defe...
614
615
616
617
618
619
620
621
622
623
  {
  	if (cfq_cfqq_slice_new(cfqq))
  		return 0;
  	if (time_before(jiffies, cfqq->slice_end))
  		return 0;
  
  	return 1;
  }
  
  /*
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
624
   * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
625
   * We choose the request that is closest to the head right now. Distance
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
626
   * behind the head is penalized and only allowed to a certain extent.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
627
   */
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
628
  static struct request *
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
629
  cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
630
  {
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
631
  	sector_t s1, s2, d1 = 0, d2 = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
632
  	unsigned long back_max;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
633
634
635
  #define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
  #define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
  	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
636

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
637
638
639
640
  	if (rq1 == NULL || rq1 == rq2)
  		return rq2;
  	if (rq2 == NULL)
  		return rq1;
9c2c38a12   Jens Axboe   [PATCH] cfq-iosch...
641

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
642
643
644
645
  	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
  		return rq1;
  	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
  		return rq2;
7b6d91dae   Christoph Hellwig   block: unify flag...
646
  	if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
647
  		return rq1;
7b6d91dae   Christoph Hellwig   block: unify flag...
648
649
  	else if ((rq2->cmd_flags & REQ_META) &&
  		 !(rq1->cmd_flags & REQ_META))
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
650
  		return rq2;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
651

83096ebf1   Tejun Heo   block: convert to...
652
653
  	s1 = blk_rq_pos(rq1);
  	s2 = blk_rq_pos(rq2);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
654

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
  	/*
  	 * by definition, 1KiB is 2 sectors
  	 */
  	back_max = cfqd->cfq_back_max * 2;
  
  	/*
  	 * Strict one way elevator _except_ in the case where we allow
  	 * short backward seeks which are biased as twice the cost of a
  	 * similar forward seek.
  	 */
  	if (s1 >= last)
  		d1 = s1 - last;
  	else if (s1 + back_max >= last)
  		d1 = (last - s1) * cfqd->cfq_back_penalty;
  	else
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
670
  		wrap |= CFQ_RQ1_WRAP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
671
672
673
674
675
676
  
  	if (s2 >= last)
  		d2 = s2 - last;
  	else if (s2 + back_max >= last)
  		d2 = (last - s2) * cfqd->cfq_back_penalty;
  	else
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
677
  		wrap |= CFQ_RQ2_WRAP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
678
679
  
  	/* Found required data */
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
680
681
682
683
684
685
  
  	/*
  	 * By doing switch() on the bit mask "wrap" we avoid having to
  	 * check two variables for all permutations: --> faster!
  	 */
  	switch (wrap) {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
686
  	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
687
  		if (d1 < d2)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
688
  			return rq1;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
689
  		else if (d2 < d1)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
690
  			return rq2;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
691
692
  		else {
  			if (s1 >= s2)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
693
  				return rq1;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
694
  			else
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
695
  				return rq2;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
696
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
697

e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
698
  	case CFQ_RQ2_WRAP:
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
699
  		return rq1;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
700
  	case CFQ_RQ1_WRAP:
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
701
702
  		return rq2;
  	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
703
704
705
706
707
708
709
710
  	default:
  		/*
  		 * Since both rqs are wrapped,
  		 * start with the one that's further behind head
  		 * (--> only *one* back seek required),
  		 * since back seek takes more time than forward.
  		 */
  		if (s1 <= s2)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
711
  			return rq1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
712
  		else
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
713
  			return rq2;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
714
715
  	}
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
716
717
718
  /*
   * The below is leftmost cache rbtree addon
   */
0871714e0   Jens Axboe   cfq-iosched: rela...
719
  static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
720
  {
615f0259e   Vivek Goyal   blkio: Implement ...
721
722
723
  	/* Service tree is empty */
  	if (!root->count)
  		return NULL;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
724
725
  	if (!root->left)
  		root->left = rb_first(&root->rb);
0871714e0   Jens Axboe   cfq-iosched: rela...
726
727
728
729
  	if (root->left)
  		return rb_entry(root->left, struct cfq_queue, rb_node);
  
  	return NULL;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
730
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
731
732
733
734
735
736
737
738
739
740
  static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
  {
  	if (!root->left)
  		root->left = rb_first(&root->rb);
  
  	if (root->left)
  		return rb_entry_cfqg(root->left);
  
  	return NULL;
  }
a36e71f99   Jens Axboe   cfq-iosched: add ...
741
742
743
744
745
  static void rb_erase_init(struct rb_node *n, struct rb_root *root)
  {
  	rb_erase(n, root);
  	RB_CLEAR_NODE(n);
  }
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
746
747
748
749
  static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
  {
  	if (root->left == n)
  		root->left = NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
750
  	rb_erase_init(n, &root->rb);
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
751
  	--root->count;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
752
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
753
754
755
  /*
   * would be nice to take fifo expire time into account as well
   */
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
756
757
758
  static struct request *
  cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  		  struct request *last)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
759
  {
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
760
761
  	struct rb_node *rbnext = rb_next(&last->rb_node);
  	struct rb_node *rbprev = rb_prev(&last->rb_node);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
762
  	struct request *next = NULL, *prev = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
763

21183b07e   Jens Axboe   [PATCH] cfq-iosch...
764
  	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
765
766
  
  	if (rbprev)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
767
  		prev = rb_entry_rq(rbprev);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
768

21183b07e   Jens Axboe   [PATCH] cfq-iosch...
769
  	if (rbnext)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
770
  		next = rb_entry_rq(rbnext);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
771
772
773
  	else {
  		rbnext = rb_first(&cfqq->sort_list);
  		if (rbnext && rbnext != &last->rb_node)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
774
  			next = rb_entry_rq(rbnext);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
775
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
776

cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
777
  	return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
778
  }
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
779
780
  static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  				      struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
781
  {
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
782
783
784
  	/*
  	 * just an approximation, should be ok.
  	 */
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
785
  	return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
464191c65   Jens Axboe   Revert "cfq: Make...
786
  		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
787
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
  static inline s64
  cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
  {
  	return cfqg->vdisktime - st->min_vdisktime;
  }
  
  static void
  __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  {
  	struct rb_node **node = &st->rb.rb_node;
  	struct rb_node *parent = NULL;
  	struct cfq_group *__cfqg;
  	s64 key = cfqg_key(st, cfqg);
  	int left = 1;
  
  	while (*node != NULL) {
  		parent = *node;
  		__cfqg = rb_entry_cfqg(parent);
  
  		if (key < cfqg_key(st, __cfqg))
  			node = &parent->rb_left;
  		else {
  			node = &parent->rb_right;
  			left = 0;
  		}
  	}
  
  	if (left)
  		st->left = &cfqg->rb_node;
  
  	rb_link_node(&cfqg->rb_node, parent, node);
  	rb_insert_color(&cfqg->rb_node, &st->rb);
  }
  
  static void
  cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
  	struct cfq_group *__cfqg;
  	struct rb_node *n;
  
  	cfqg->nr_cfqq++;
  	if (cfqg->on_st)
  		return;
  
  	/*
  	 * Currently put the group at the end. Later implement something
  	 * so that groups get lesser vtime based on their weights, so that
  	 * if group does not loose all if it was not continously backlogged.
  	 */
  	n = rb_last(&st->rb);
  	if (n) {
  		__cfqg = rb_entry_cfqg(n);
  		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
  	} else
  		cfqg->vdisktime = st->min_vdisktime;
  
  	__cfq_group_service_tree_add(st, cfqg);
  	cfqg->on_st = true;
58ff82f34   Vivek Goyal   blkio: Implement ...
847
  	st->total_weight += cfqg->weight;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
848
849
850
851
852
853
  }
  
  static void
  cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b077   Vivek Goyal   blkio: Introduce ...
854
855
  	if (st->active == &cfqg->rb_node)
  		st->active = NULL;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
856
857
  	BUG_ON(cfqg->nr_cfqq < 1);
  	cfqg->nr_cfqq--;
25bc6b077   Vivek Goyal   blkio: Introduce ...
858

1fa8f6d68   Vivek Goyal   blkio: Introduce ...
859
860
861
  	/* If there are other cfq queues under this group, don't delete it */
  	if (cfqg->nr_cfqq)
  		return;
2868ef7b3   Vivek Goyal   blkio: Some debug...
862
  	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
863
  	cfqg->on_st = false;
58ff82f34   Vivek Goyal   blkio: Implement ...
864
  	st->total_weight -= cfqg->weight;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
865
866
  	if (!RB_EMPTY_NODE(&cfqg->rb_node))
  		cfq_rb_erase(&cfqg->rb_node, st);
dae739ebc   Vivek Goyal   blkio: Group time...
867
  	cfqg->saved_workload_slice = 0;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
868
  	cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
dae739ebc   Vivek Goyal   blkio: Group time...
869
870
871
872
  }
  
  static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
  {
f75edf2dc   Vivek Goyal   blkio: Wait for c...
873
  	unsigned int slice_used;
dae739ebc   Vivek Goyal   blkio: Group time...
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
  
  	/*
  	 * Queue got expired before even a single request completed or
  	 * got expired immediately after first request completion.
  	 */
  	if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
  		/*
  		 * Also charge the seek time incurred to the group, otherwise
  		 * if there are mutiple queues in the group, each can dispatch
  		 * a single request on seeky media and cause lots of seek time
  		 * and group will never know it.
  		 */
  		slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
  					1);
  	} else {
  		slice_used = jiffies - cfqq->slice_start;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
890
891
  		if (slice_used > cfqq->allocated_slice)
  			slice_used = cfqq->allocated_slice;
dae739ebc   Vivek Goyal   blkio: Group time...
892
  	}
dae739ebc   Vivek Goyal   blkio: Group time...
893
894
895
896
  	return slice_used;
  }
  
  static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
897
  				struct cfq_queue *cfqq)
dae739ebc   Vivek Goyal   blkio: Group time...
898
899
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
02b35081f   Vivek Goyal   cfq-iosched: Do g...
900
  	unsigned int used_sl, charge;
f26bd1f0a   Vivek Goyal   blkio: Determine ...
901
902
903
904
  	int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
  			- cfqg->service_tree_idle.count;
  
  	BUG_ON(nr_sync < 0);
02b35081f   Vivek Goyal   cfq-iosched: Do g...
905
  	used_sl = charge = cfq_cfqq_slice_usage(cfqq);
dae739ebc   Vivek Goyal   blkio: Group time...
906

02b35081f   Vivek Goyal   cfq-iosched: Do g...
907
908
909
910
  	if (iops_mode(cfqd))
  		charge = cfqq->slice_dispatch;
  	else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
  		charge = cfqq->allocated_slice;
dae739ebc   Vivek Goyal   blkio: Group time...
911
912
913
  
  	/* Can't update vdisktime while group is on service tree */
  	cfq_rb_erase(&cfqg->rb_node, st);
02b35081f   Vivek Goyal   cfq-iosched: Do g...
914
  	cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
dae739ebc   Vivek Goyal   blkio: Group time...
915
916
917
918
919
920
921
922
923
924
  	__cfq_group_service_tree_add(st, cfqg);
  
  	/* This group is being expired. Save the context */
  	if (time_after(cfqd->workload_expires, jiffies)) {
  		cfqg->saved_workload_slice = cfqd->workload_expires
  						- jiffies;
  		cfqg->saved_workload = cfqd->serving_type;
  		cfqg->saved_serving_prio = cfqd->serving_prio;
  	} else
  		cfqg->saved_workload_slice = 0;
2868ef7b3   Vivek Goyal   blkio: Some debug...
925
926
927
  
  	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
  					st->min_vdisktime);
c4e7893eb   Vivek Goyal   cfq-iosched: blkt...
928
929
930
  	cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
  			" sect=%u", used_sl, cfqq->slice_dispatch, charge,
  			iops_mode(cfqd), cfqq->nr_sectors);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
931
932
  	cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
  	cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
933
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
934
935
936
937
938
939
940
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
  {
  	if (blkg)
  		return container_of(blkg, struct cfq_group, blkg);
  	return NULL;
  }
fe0714377   Vivek Goyal   blkio: Recalculat...
941
942
  void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
  					unsigned int weight)
f8d461d69   Vivek Goyal   blkio: Propagate ...
943
944
945
  {
  	cfqg_of_blkg(blkg)->weight = weight;
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
946
947
948
949
950
951
952
953
  static struct cfq_group *
  cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
  {
  	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  	struct cfq_group *cfqg = NULL;
  	void *key = cfqd;
  	int i, j;
  	struct cfq_rb_root *st;
220841906   Vivek Goyal   blkio: Export dis...
954
955
  	struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
  	unsigned int major, minor;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
956

25fb5169d   Vivek Goyal   blkio: Dynamic cf...
957
  	cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
a74b2adae   Ricky Benitez   block: expose the...
958
959
960
961
962
  	if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
  		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
  		cfqg->blkg.dev = MKDEV(major, minor);
  		goto done;
  	}
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
963
964
965
966
967
968
  	if (cfqg || !create)
  		goto done;
  
  	cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
  	if (!cfqg)
  		goto done;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
969
970
971
  	for_each_cfqg_st(cfqg, i, j, st)
  		*st = CFQ_RB_ROOT;
  	RB_CLEAR_NODE(&cfqg->rb_node);
b1c357696   Vivek Goyal   blkio: Take care ...
972
973
974
975
976
977
978
  	/*
  	 * Take the initial reference that will be released on destroy
  	 * This can be thought of a joint reference by cgroup and
  	 * elevator which will be dropped by either elevator exit
  	 * or cgroup deletion path depending on who is exiting first.
  	 */
  	atomic_set(&cfqg->ref, 1);
180be2a04   Vivek Goyal   cfq-iosched: fix ...
979
980
981
982
983
984
985
986
987
  	/*
  	 * Add group onto cgroup list. It might happen that bdi->dev is
  	 * not initiliazed yet. Initialize this new group without major
  	 * and minor info and this info will be filled in once a new thread
  	 * comes for IO. See code above.
  	 */
  	if (bdi->dev) {
  		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
  		cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
220841906   Vivek Goyal   blkio: Export dis...
988
  					MKDEV(major, minor));
180be2a04   Vivek Goyal   cfq-iosched: fix ...
989
990
991
  	} else
  		cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
  					0);
34d0f179d   Gui Jianfeng   io-controller: Ad...
992
  	cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
993
994
995
996
997
  
  	/* Add group on cfqd list */
  	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
  
  done:
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
  	return cfqg;
  }
  
  /*
   * Search for the cfq group current task belongs to. If create = 1, then also
   * create the cfq group if it does not exist. request_queue lock must be held.
   */
  static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
  {
  	struct cgroup *cgroup;
  	struct cfq_group *cfqg = NULL;
  
  	rcu_read_lock();
  	cgroup = task_cgroup(current, blkio_subsys_id);
  	cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
  	if (!cfqg && create)
  		cfqg = &cfqd->root_group;
  	rcu_read_unlock();
  	return cfqg;
  }
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1018
1019
1020
1021
1022
  static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
  {
  	atomic_inc(&cfqg->ref);
  	return cfqg;
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1023
1024
1025
1026
1027
1028
1029
  static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
  {
  	/* Currently, all async queues are mapped to root group */
  	if (!cfq_cfqq_sync(cfqq))
  		cfqg = &cfqq->cfqd->root_group;
  
  	cfqq->cfqg = cfqg;
b1c357696   Vivek Goyal   blkio: Take care ...
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
  	/* cfqq reference on cfqg */
  	atomic_inc(&cfqq->cfqg->ref);
  }
  
  static void cfq_put_cfqg(struct cfq_group *cfqg)
  {
  	struct cfq_rb_root *st;
  	int i, j;
  
  	BUG_ON(atomic_read(&cfqg->ref) <= 0);
  	if (!atomic_dec_and_test(&cfqg->ref))
  		return;
  	for_each_cfqg_st(cfqg, i, j, st)
  		BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
  	kfree(cfqg);
  }
  
  static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
  {
  	/* Something wrong if we are trying to remove same group twice */
  	BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
  
  	hlist_del_init(&cfqg->cfqd_node);
  
  	/*
  	 * Put the reference taken at the time of creation so that when all
  	 * queues are gone, group can be destroyed.
  	 */
  	cfq_put_cfqg(cfqg);
  }
  
  static void cfq_release_cfq_groups(struct cfq_data *cfqd)
  {
  	struct hlist_node *pos, *n;
  	struct cfq_group *cfqg;
  
  	hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
  		/*
  		 * If cgroup removal path got to blk_group first and removed
  		 * it from cgroup list, then it will take care of destroying
  		 * cfqg also.
  		 */
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1072
  		if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
b1c357696   Vivek Goyal   blkio: Take care ...
1073
1074
  			cfq_destroy_cfqg(cfqd, cfqg);
  	}
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1075
  }
b1c357696   Vivek Goyal   blkio: Take care ...
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
  
  /*
   * Blk cgroup controller notification saying that blkio_group object is being
   * delinked as associated cgroup object is going away. That also means that
   * no new IO will come in this group. So get rid of this group as soon as
   * any pending IO in the group is finished.
   *
   * This function is called under rcu_read_lock(). key is the rcu protected
   * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
   * read lock.
   *
   * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
   * it should not be NULL as even if elevator was exiting, cgroup deltion
   * path got to it first.
   */
  void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
  {
  	unsigned long  flags;
  	struct cfq_data *cfqd = key;
  
  	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  	cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
  	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1100
1101
1102
1103
1104
  #else /* GROUP_IOSCHED */
  static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
  {
  	return &cfqd->root_group;
  }
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1105
1106
1107
  
  static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
  {
50eaeb323   Dmitry Monakhov   cfq-iosched: fix ...
1108
  	return cfqg;
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1109
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1110
1111
1112
1113
  static inline void
  cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
  	cfqq->cfqg = cfqg;
  }
b1c357696   Vivek Goyal   blkio: Take care ...
1114
1115
  static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
  static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1116
  #endif /* GROUP_IOSCHED */
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1117
  /*
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1118
   * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1119
1120
1121
   * requests waiting to be processed. It is sorted in the order that
   * we will service the queues.
   */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1122
  static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1123
  				 bool add_front)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1124
  {
0871714e0   Jens Axboe   cfq-iosched: rela...
1125
1126
  	struct rb_node **p, *parent;
  	struct cfq_queue *__cfqq;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1127
  	unsigned long rb_key;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1128
  	struct cfq_rb_root *service_tree;
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1129
  	int left;
dae739ebc   Vivek Goyal   blkio: Group time...
1130
  	int new_cfqq = 1;
ae30c2865   Vivek Goyal   blkio: Implement ...
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
  	int group_changed = 0;
  
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	if (!cfqd->cfq_group_isolation
  	    && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
  	    && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
  		/* Move this cfq to root group */
  		cfq_log_cfqq(cfqd, cfqq, "moving to root group");
  		if (!RB_EMPTY_NODE(&cfqq->rb_node))
  			cfq_group_service_tree_del(cfqd, cfqq->cfqg);
  		cfqq->orig_cfqg = cfqq->cfqg;
  		cfqq->cfqg = &cfqd->root_group;
  		atomic_inc(&cfqd->root_group.ref);
  		group_changed = 1;
  	} else if (!cfqd->cfq_group_isolation
  		   && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
  		/* cfqq is sequential now needs to go to its original group */
  		BUG_ON(cfqq->cfqg != &cfqd->root_group);
  		if (!RB_EMPTY_NODE(&cfqq->rb_node))
  			cfq_group_service_tree_del(cfqd, cfqq->cfqg);
  		cfq_put_cfqg(cfqq->cfqg);
  		cfqq->cfqg = cfqq->orig_cfqg;
  		cfqq->orig_cfqg = NULL;
  		group_changed = 1;
  		cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
  	}
  #endif
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1158

cdb16e8f7   Vivek Goyal   blkio: Introduce ...
1159
  	service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
65b32a573   Vivek Goyal   cfq-iosched: Remo...
1160
  						cfqq_type(cfqq));
0871714e0   Jens Axboe   cfq-iosched: rela...
1161
1162
  	if (cfq_class_idle(cfqq)) {
  		rb_key = CFQ_IDLE_DELAY;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1163
  		parent = rb_last(&service_tree->rb);
0871714e0   Jens Axboe   cfq-iosched: rela...
1164
1165
1166
1167
1168
1169
  		if (parent && parent != &cfqq->rb_node) {
  			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  			rb_key += __cfqq->rb_key;
  		} else
  			rb_key += jiffies;
  	} else if (!add_front) {
b9c8946b1   Jens Axboe   cfq-iosched: fix ...
1170
1171
1172
1173
1174
1175
  		/*
  		 * Get our rb key offset. Subtract any residual slice
  		 * value carried from last service. A negative resid
  		 * count indicates slice overrun, and this should position
  		 * the next service time further away in the tree.
  		 */
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1176
  		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
b9c8946b1   Jens Axboe   cfq-iosched: fix ...
1177
  		rb_key -= cfqq->slice_resid;
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1178
  		cfqq->slice_resid = 0;
48e025e63   Corrado Zoccolo   cfq-iosched: fix ...
1179
1180
  	} else {
  		rb_key = -HZ;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1181
  		__cfqq = cfq_rb_first(service_tree);
48e025e63   Corrado Zoccolo   cfq-iosched: fix ...
1182
1183
  		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1184

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1185
  	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
dae739ebc   Vivek Goyal   blkio: Group time...
1186
  		new_cfqq = 0;
99f9628ab   Jens Axboe   [PATCH] cfq-iosch...
1187
  		/*
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1188
  		 * same position, nothing more to do
99f9628ab   Jens Axboe   [PATCH] cfq-iosch...
1189
  		 */
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1190
1191
  		if (rb_key == cfqq->rb_key &&
  		    cfqq->service_tree == service_tree)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1192
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1193

aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1194
1195
  		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  		cfqq->service_tree = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1196
  	}
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1197

498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1198
  	left = 1;
0871714e0   Jens Axboe   cfq-iosched: rela...
1199
  	parent = NULL;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1200
1201
  	cfqq->service_tree = service_tree;
  	p = &service_tree->rb.rb_node;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1202
  	while (*p) {
67060e379   Jens Axboe   cfq-iosched: sort...
1203
  		struct rb_node **n;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1204

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1205
1206
  		parent = *p;
  		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
0c534e0a4   Jens Axboe   cfq-iosched: sort...
1207
  		/*
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1208
  		 * sort by key, that represents service time.
0c534e0a4   Jens Axboe   cfq-iosched: sort...
1209
  		 */
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1210
  		if (time_before(rb_key, __cfqq->rb_key))
67060e379   Jens Axboe   cfq-iosched: sort...
1211
  			n = &(*p)->rb_left;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1212
  		else {
67060e379   Jens Axboe   cfq-iosched: sort...
1213
  			n = &(*p)->rb_right;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1214
  			left = 0;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1215
  		}
67060e379   Jens Axboe   cfq-iosched: sort...
1216
1217
  
  		p = n;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1218
  	}
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1219
  	if (left)
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1220
  		service_tree->left = &cfqq->rb_node;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1221

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1222
1223
  	cfqq->rb_key = rb_key;
  	rb_link_node(&cfqq->rb_node, parent, p);
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1224
1225
  	rb_insert_color(&cfqq->rb_node, &service_tree->rb);
  	service_tree->count++;
ae30c2865   Vivek Goyal   blkio: Implement ...
1226
  	if ((add_front || !new_cfqq) && !group_changed)
dae739ebc   Vivek Goyal   blkio: Group time...
1227
  		return;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
1228
  	cfq_group_service_tree_add(cfqd, cfqq->cfqg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1229
  }
a36e71f99   Jens Axboe   cfq-iosched: add ...
1230
  static struct cfq_queue *
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1231
1232
1233
  cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
  		     sector_t sector, struct rb_node **ret_parent,
  		     struct rb_node ***rb_link)
a36e71f99   Jens Axboe   cfq-iosched: add ...
1234
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
  	struct rb_node **p, *parent;
  	struct cfq_queue *cfqq = NULL;
  
  	parent = NULL;
  	p = &root->rb_node;
  	while (*p) {
  		struct rb_node **n;
  
  		parent = *p;
  		cfqq = rb_entry(parent, struct cfq_queue, p_node);
  
  		/*
  		 * Sort strictly based on sector.  Smallest to the left,
  		 * largest to the right.
  		 */
2e46e8b27   Tejun Heo   block: drop reque...
1250
  		if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1251
  			n = &(*p)->rb_right;
2e46e8b27   Tejun Heo   block: drop reque...
1252
  		else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1253
1254
1255
1256
  			n = &(*p)->rb_left;
  		else
  			break;
  		p = n;
3ac6c9f8a   Jens Axboe   cfq-iosched: fix ...
1257
  		cfqq = NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1258
1259
1260
1261
1262
  	}
  
  	*ret_parent = parent;
  	if (rb_link)
  		*rb_link = p;
3ac6c9f8a   Jens Axboe   cfq-iosched: fix ...
1263
  	return cfqq;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1264
1265
1266
1267
  }
  
  static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
1268
1269
  	struct rb_node **p, *parent;
  	struct cfq_queue *__cfqq;
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1270
1271
1272
1273
  	if (cfqq->p_root) {
  		rb_erase(&cfqq->p_node, cfqq->p_root);
  		cfqq->p_root = NULL;
  	}
a36e71f99   Jens Axboe   cfq-iosched: add ...
1274
1275
1276
1277
1278
  
  	if (cfq_class_idle(cfqq))
  		return;
  	if (!cfqq->next_rq)
  		return;
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1279
  	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b27   Tejun Heo   block: drop reque...
1280
1281
  	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
  				      blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8a   Jens Axboe   cfq-iosched: fix ...
1282
1283
  	if (!__cfqq) {
  		rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1284
1285
1286
  		rb_insert_color(&cfqq->p_node, cfqq->p_root);
  	} else
  		cfqq->p_root = NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1287
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1288
1289
1290
  /*
   * Update cfqq's position in the service tree.
   */
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1291
  static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1292
  {
6d048f531   Jens Axboe   cfq-iosched: deve...
1293
1294
1295
  	/*
  	 * Resorting requires the cfqq to be on the RR list already.
  	 */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1296
  	if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1297
  		cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f99   Jens Axboe   cfq-iosched: add ...
1298
1299
  		cfq_prio_tree_add(cfqd, cfqq);
  	}
6d048f531   Jens Axboe   cfq-iosched: deve...
1300
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1301
1302
  /*
   * add to busy list of queues for service, trying to be fair in ordering
22e2c507c   Jens Axboe   [PATCH] Update cf...
1303
   * the pending list according to last request service
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1304
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1305
  static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1306
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
1307
  	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1308
1309
  	BUG_ON(cfq_cfqq_on_rr(cfqq));
  	cfq_mark_cfqq_on_rr(cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1310
  	cfqd->busy_queues++;
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1311
  	cfq_resort_rr_list(cfqd, cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1312
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1313
1314
1315
1316
  /*
   * Called when the cfqq no longer has requests pending, remove it from
   * the service tree.
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1317
  static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1318
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
1319
  	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1320
1321
  	BUG_ON(!cfq_cfqq_on_rr(cfqq));
  	cfq_clear_cfqq_on_rr(cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1322

aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1323
1324
1325
1326
  	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  		cfqq->service_tree = NULL;
  	}
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1327
1328
1329
1330
  	if (cfqq->p_root) {
  		rb_erase(&cfqq->p_node, cfqq->p_root);
  		cfqq->p_root = NULL;
  	}
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1331

1fa8f6d68   Vivek Goyal   blkio: Introduce ...
1332
  	cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1333
1334
1335
1336
1337
1338
1339
  	BUG_ON(!cfqd->busy_queues);
  	cfqd->busy_queues--;
  }
  
  /*
   * rb tree support functions
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1340
  static void cfq_del_rq_rb(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1341
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1342
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1343
  	const int sync = rq_is_sync(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1344

b4878f245   Jens Axboe   [PATCH] 02/05: up...
1345
1346
  	BUG_ON(!cfqq->queued[sync]);
  	cfqq->queued[sync]--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1347

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1348
  	elv_rb_del(&cfqq->sort_list, rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1349

f04a64246   Vivek Goyal   blkio: Keep queue...
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
  	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
  		/*
  		 * Queue will be deleted from service tree when we actually
  		 * expire it later. Right now just remove it from prio tree
  		 * as it is empty.
  		 */
  		if (cfqq->p_root) {
  			rb_erase(&cfqq->p_node, cfqq->p_root);
  			cfqq->p_root = NULL;
  		}
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1361
  }
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1362
  static void cfq_add_rq_rb(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1363
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1364
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1365
  	struct cfq_data *cfqd = cfqq->cfqd;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1366
  	struct request *__alias, *prev;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1367

5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1368
  	cfqq->queued[rq_is_sync(rq)]++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1369
1370
1371
1372
1373
  
  	/*
  	 * looks a little odd, but the first insert might return an alias.
  	 * if that happens, put the alias on the dispatch list
  	 */
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1374
  	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1375
  		cfq_dispatch_insert(cfqd->queue, __alias);
5fccbf61b   Jens Axboe   [PATCH] CFQ: requ...
1376
1377
1378
  
  	if (!cfq_cfqq_on_rr(cfqq))
  		cfq_add_cfqq_rr(cfqd, cfqq);
5044eed48   Jens Axboe   cfq-iosched: fix ...
1379
1380
1381
1382
  
  	/*
  	 * check if this request is a better next-serve candidate
  	 */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1383
  	prev = cfqq->next_rq;
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
1384
  	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
a36e71f99   Jens Axboe   cfq-iosched: add ...
1385
1386
1387
1388
1389
1390
  
  	/*
  	 * adjust priority tree position, if ->next_rq changes
  	 */
  	if (prev != cfqq->next_rq)
  		cfq_prio_tree_add(cfqd, cfqq);
5044eed48   Jens Axboe   cfq-iosched: fix ...
1391
  	BUG_ON(!cfqq->next_rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1392
  }
febffd618   Jens Axboe   cfq-iosched: kill...
1393
  static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1394
  {
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1395
1396
  	elv_rb_del(&cfqq->sort_list, rq);
  	cfqq->queued[rq_is_sync(rq)]--;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1397
1398
  	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
  					rq_data_dir(rq), rq_is_sync(rq));
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1399
  	cfq_add_rq_rb(rq);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1400
  	cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1401
1402
  			&cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
  			rq_is_sync(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1403
  }
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1404
1405
  static struct request *
  cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1406
  {
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1407
  	struct task_struct *tsk = current;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1408
  	struct cfq_io_context *cic;
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1409
  	struct cfq_queue *cfqq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1410

4ac845a2e   Jens Axboe   block: cfq: make ...
1411
  	cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1412
1413
1414
1415
  	if (!cic)
  		return NULL;
  
  	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
1416
1417
  	if (cfqq) {
  		sector_t sector = bio->bi_sector + bio_sectors(bio);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1418
  		return elv_rb_find(&cfqq->sort_list, sector);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
1419
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1420

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1421
1422
  	return NULL;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1423
  static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1424
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
1425
  	struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1426

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1427
  	cfqd->rq_in_driver++;
7b679138b   Jens Axboe   cfq-iosched: add ...
1428
  	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1429
  						cfqd->rq_in_driver);
25776e359   Jens Axboe   [PATCH] cfq-iosch...
1430

5b93629b4   Tejun Heo   block: implement ...
1431
  	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1432
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1433
  static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1434
  {
b4878f245   Jens Axboe   [PATCH] 02/05: up...
1435
  	struct cfq_data *cfqd = q->elevator->elevator_data;
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1436
1437
  	WARN_ON(!cfqd->rq_in_driver);
  	cfqd->rq_in_driver--;
7b679138b   Jens Axboe   cfq-iosched: add ...
1438
  	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1439
  						cfqd->rq_in_driver);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1440
  }
b4878f245   Jens Axboe   [PATCH] 02/05: up...
1441
  static void cfq_remove_request(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1442
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1443
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1444

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1445
1446
  	if (cfqq->next_rq == rq)
  		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1447

b4878f245   Jens Axboe   [PATCH] 02/05: up...
1448
  	list_del_init(&rq->queuelist);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1449
  	cfq_del_rq_rb(rq);
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
1450

45333d5a3   Aaron Carroll   cfq-iosched: fix ...
1451
  	cfqq->cfqd->rq_queued--;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1452
1453
  	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
  					rq_data_dir(rq), rq_is_sync(rq));
7b6d91dae   Christoph Hellwig   block: unify flag...
1454
  	if (rq->cmd_flags & REQ_META) {
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
1455
1456
1457
  		WARN_ON(!cfqq->meta_pending);
  		cfqq->meta_pending--;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1458
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1459
1460
  static int cfq_merge(struct request_queue *q, struct request **req,
  		     struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1461
1462
1463
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct request *__rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1464

206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1465
  	__rq = cfq_find_rq_fmerge(cfqd, bio);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1466
  	if (__rq && elv_rq_merge_ok(__rq, bio)) {
9817064b6   Jens Axboe   [PATCH] elevator:...
1467
1468
  		*req = __rq;
  		return ELEVATOR_FRONT_MERGE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1469
1470
1471
  	}
  
  	return ELEVATOR_NO_MERGE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1472
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1473
  static void cfq_merged_request(struct request_queue *q, struct request *req,
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1474
  			       int type)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1475
  {
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1476
  	if (type == ELEVATOR_FRONT_MERGE) {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1477
  		struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1478

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1479
  		cfq_reposition_rq_rb(cfqq, req);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1480
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1481
  }
812d40264   Divyesh Shah   blkio: Add io_mer...
1482
1483
1484
  static void cfq_bio_merged(struct request_queue *q, struct request *req,
  				struct bio *bio)
  {
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1485
1486
  	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
  					bio_data_dir(bio), cfq_bio_sync(bio));
812d40264   Divyesh Shah   blkio: Add io_mer...
1487
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1488
  static void
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1489
  cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1490
1491
  		    struct request *next)
  {
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
1492
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1493
1494
1495
1496
  	/*
  	 * reposition in fifo if next is older than rq
  	 */
  	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
30996f40b   Jens Axboe   cfq-iosched: fix ...
1497
  	    time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
22e2c507c   Jens Axboe   [PATCH] Update cf...
1498
  		list_move(&rq->queuelist, &next->queuelist);
30996f40b   Jens Axboe   cfq-iosched: fix ...
1499
1500
  		rq_set_fifo_time(rq, rq_fifo_time(next));
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
1501

cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
1502
1503
  	if (cfqq->next_rq == next)
  		cfqq->next_rq = rq;
b4878f245   Jens Axboe   [PATCH] 02/05: up...
1504
  	cfq_remove_request(next);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1505
1506
  	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
  					rq_data_dir(next), rq_is_sync(next));
22e2c507c   Jens Axboe   [PATCH] Update cf...
1507
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1508
  static int cfq_allow_merge(struct request_queue *q, struct request *rq,
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1509
1510
1511
  			   struct bio *bio)
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1512
  	struct cfq_io_context *cic;
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1513
  	struct cfq_queue *cfqq;
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1514
1515
  
  	/*
ec8acb690   Jens Axboe   [PATCH] cfq-iosch...
1516
  	 * Disallow merge of a sync bio into an async request.
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1517
  	 */
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1518
  	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1519
  		return false;
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1520
1521
  
  	/*
719d34027   Jens Axboe   [PATCH] cfq-iosch...
1522
1523
  	 * Lookup the cfqq that this bio will be queued with. Allow
  	 * merge only if rq is queued there.
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1524
  	 */
4ac845a2e   Jens Axboe   block: cfq: make ...
1525
  	cic = cfq_cic_lookup(cfqd, current->io_context);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1526
  	if (!cic)
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1527
  		return false;
719d34027   Jens Axboe   [PATCH] cfq-iosch...
1528

91fac317a   Vasily Tarasov   cfq-iosched: get ...
1529
  	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1530
  	return cfqq == RQ_CFQQ(rq);
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1531
  }
812df48d1   Divyesh Shah   blkio: Add more d...
1532
1533
1534
  static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	del_timer(&cfqd->idle_slice_timer);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1535
  	cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
812df48d1   Divyesh Shah   blkio: Add more d...
1536
  }
febffd618   Jens Axboe   cfq-iosched: kill...
1537
1538
  static void __cfq_set_active_queue(struct cfq_data *cfqd,
  				   struct cfq_queue *cfqq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
1539
1540
  {
  	if (cfqq) {
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
1541
1542
  		cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
  				cfqd->serving_prio, cfqd->serving_type);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1543
  		cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
dae739ebc   Vivek Goyal   blkio: Group time...
1544
1545
  		cfqq->slice_start = 0;
  		cfqq->dispatch_start = jiffies;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
1546
  		cfqq->allocated_slice = 0;
22e2c507c   Jens Axboe   [PATCH] Update cf...
1547
  		cfqq->slice_end = 0;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
1548
  		cfqq->slice_dispatch = 0;
c4e7893eb   Vivek Goyal   cfq-iosched: blkt...
1549
  		cfqq->nr_sectors = 0;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
1550

2f5cb7381   Jens Axboe   cfq-iosched: chan...
1551
  		cfq_clear_cfqq_wait_request(cfqq);
b029195dd   Jens Axboe   cfq-iosched: don'...
1552
  		cfq_clear_cfqq_must_dispatch(cfqq);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1553
1554
  		cfq_clear_cfqq_must_alloc_slice(cfqq);
  		cfq_clear_cfqq_fifo_expire(cfqq);
44f7c1606   Jens Axboe   cfq-iosched: defe...
1555
  		cfq_mark_cfqq_slice_new(cfqq);
2f5cb7381   Jens Axboe   cfq-iosched: chan...
1556

812df48d1   Divyesh Shah   blkio: Add more d...
1557
  		cfq_del_timer(cfqd, cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1558
1559
1560
1561
1562
1563
  	}
  
  	cfqd->active_queue = cfqq;
  }
  
  /*
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1564
1565
1566
1567
   * current cfqq expired its slice (or was too idle), select new one
   */
  static void
  __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1568
  		    bool timed_out)
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1569
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
1570
  	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1571
  	if (cfq_cfqq_wait_request(cfqq))
812df48d1   Divyesh Shah   blkio: Add more d...
1572
  		cfq_del_timer(cfqd, cfqq);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1573

7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1574
  	cfq_clear_cfqq_wait_request(cfqq);
f75edf2dc   Vivek Goyal   blkio: Wait for c...
1575
  	cfq_clear_cfqq_wait_busy(cfqq);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1576
1577
  
  	/*
ae54abed6   Shaohua Li   cfq-iosched: spli...
1578
1579
1580
1581
1582
1583
1584
1585
1586
  	 * If this cfqq is shared between multiple processes, check to
  	 * make sure that those processes are still issuing I/Os within
  	 * the mean seek distance.  If not, it may be time to break the
  	 * queues apart again.
  	 */
  	if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
  		cfq_mark_cfqq_split_coop(cfqq);
  
  	/*
6084cdda0   Jens Axboe   cfq-iosched: don'...
1587
  	 * store what was left of this slice, if the queue idled/timed out
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1588
  	 */
7b679138b   Jens Axboe   cfq-iosched: add ...
1589
  	if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
c5b680f3b   Jens Axboe   cfq-iosched: acco...
1590
  		cfqq->slice_resid = cfqq->slice_end - jiffies;
7b679138b   Jens Axboe   cfq-iosched: add ...
1591
1592
  		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
  	}
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1593

e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1594
  	cfq_group_served(cfqd, cfqq->cfqg, cfqq);
dae739ebc   Vivek Goyal   blkio: Group time...
1595

f04a64246   Vivek Goyal   blkio: Keep queue...
1596
1597
  	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
  		cfq_del_cfqq_rr(cfqd, cfqq);
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1598
  	cfq_resort_rr_list(cfqd, cfqq);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1599
1600
1601
  
  	if (cfqq == cfqd->active_queue)
  		cfqd->active_queue = NULL;
dae739ebc   Vivek Goyal   blkio: Group time...
1602
1603
  	if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
  		cfqd->grp_service_tree.active = NULL;
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1604
1605
1606
1607
  	if (cfqd->active_cic) {
  		put_io_context(cfqd->active_cic->ioc);
  		cfqd->active_cic = NULL;
  	}
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1608
  }
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1609
  static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1610
1611
1612
1613
  {
  	struct cfq_queue *cfqq = cfqd->active_queue;
  
  	if (cfqq)
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1614
  		__cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1615
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1616
1617
1618
1619
  /*
   * Get next queue for service. Unless we have a queue preemption,
   * we'll simply select the first cfqq in the service tree.
   */
6d048f531   Jens Axboe   cfq-iosched: deve...
1620
  static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507c   Jens Axboe   [PATCH] Update cf...
1621
  {
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1622
  	struct cfq_rb_root *service_tree =
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
1623
  		service_tree_for(cfqd->serving_group, cfqd->serving_prio,
65b32a573   Vivek Goyal   cfq-iosched: Remo...
1624
  					cfqd->serving_type);
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1625

f04a64246   Vivek Goyal   blkio: Keep queue...
1626
1627
  	if (!cfqd->rq_queued)
  		return NULL;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
1628
1629
1630
  	/* There is nothing to dispatch */
  	if (!service_tree)
  		return NULL;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1631
1632
1633
  	if (RB_EMPTY_ROOT(&service_tree->rb))
  		return NULL;
  	return cfq_rb_first(service_tree);
6d048f531   Jens Axboe   cfq-iosched: deve...
1634
  }
f04a64246   Vivek Goyal   blkio: Keep queue...
1635
1636
  static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
  {
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1637
  	struct cfq_group *cfqg;
f04a64246   Vivek Goyal   blkio: Keep queue...
1638
1639
1640
1641
1642
1643
  	struct cfq_queue *cfqq;
  	int i, j;
  	struct cfq_rb_root *st;
  
  	if (!cfqd->rq_queued)
  		return NULL;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1644
1645
1646
  	cfqg = cfq_get_next_cfqg(cfqd);
  	if (!cfqg)
  		return NULL;
f04a64246   Vivek Goyal   blkio: Keep queue...
1647
1648
1649
1650
1651
  	for_each_cfqg_st(cfqg, i, j, st)
  		if ((cfqq = cfq_rb_first(st)) != NULL)
  			return cfqq;
  	return NULL;
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1652
1653
1654
  /*
   * Get and set a new active queue for service.
   */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1655
1656
  static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
  					      struct cfq_queue *cfqq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1657
  {
e00ef7997   Jens Axboe   cfq-iosched: get ...
1658
  	if (!cfqq)
a36e71f99   Jens Axboe   cfq-iosched: add ...
1659
  		cfqq = cfq_get_next_queue(cfqd);
6d048f531   Jens Axboe   cfq-iosched: deve...
1660

22e2c507c   Jens Axboe   [PATCH] Update cf...
1661
  	__cfq_set_active_queue(cfqd, cfqq);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1662
  	return cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
1663
  }
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1664
1665
1666
  static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
  					  struct request *rq)
  {
83096ebf1   Tejun Heo   block: convert to...
1667
1668
  	if (blk_rq_pos(rq) >= cfqd->last_position)
  		return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1669
  	else
83096ebf1   Tejun Heo   block: convert to...
1670
  		return cfqd->last_position - blk_rq_pos(rq);
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1671
  }
b2c18e1e0   Jeff Moyer   cfq: calculate th...
1672
  static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1673
  			       struct request *rq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1674
  {
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1675
  	return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
6d048f531   Jens Axboe   cfq-iosched: deve...
1676
  }
a36e71f99   Jens Axboe   cfq-iosched: add ...
1677
1678
1679
  static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
  				    struct cfq_queue *cur_cfqq)
  {
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1680
  	struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f99   Jens Axboe   cfq-iosched: add ...
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
  	struct rb_node *parent, *node;
  	struct cfq_queue *__cfqq;
  	sector_t sector = cfqd->last_position;
  
  	if (RB_EMPTY_ROOT(root))
  		return NULL;
  
  	/*
  	 * First, if we find a request starting at the end of the last
  	 * request, choose it.
  	 */
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1692
  	__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f99   Jens Axboe   cfq-iosched: add ...
1693
1694
1695
1696
1697
1698
1699
1700
  	if (__cfqq)
  		return __cfqq;
  
  	/*
  	 * If the exact sector wasn't found, the parent of the NULL leaf
  	 * will contain the closest sector.
  	 */
  	__cfqq = rb_entry(parent, struct cfq_queue, p_node);
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1701
  	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1702
  		return __cfqq;
2e46e8b27   Tejun Heo   block: drop reque...
1703
  	if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f99   Jens Axboe   cfq-iosched: add ...
1704
1705
1706
1707
1708
1709
1710
  		node = rb_next(&__cfqq->p_node);
  	else
  		node = rb_prev(&__cfqq->p_node);
  	if (!node)
  		return NULL;
  
  	__cfqq = rb_entry(node, struct cfq_queue, p_node);
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1711
  	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
  		return __cfqq;
  
  	return NULL;
  }
  
  /*
   * cfqd - obvious
   * cur_cfqq - passed in so that we don't decide that the current queue is
   * 	      closely cooperating with itself.
   *
   * So, basically we're assuming that that cur_cfqq has dispatched at least
   * one request, and that cfqd->last_position reflects a position on the disk
   * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
   * assumption.
   */
  static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d0408   Jeff Moyer   cfq: change the m...
1728
  					      struct cfq_queue *cur_cfqq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1729
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
1730
  	struct cfq_queue *cfqq;
39c01b219   Divyesh Shah   cfq-iosched: Do n...
1731
1732
  	if (cfq_class_idle(cur_cfqq))
  		return NULL;
e6c5bc737   Jeff Moyer   cfq: break apart ...
1733
1734
1735
1736
  	if (!cfq_cfqq_sync(cur_cfqq))
  		return NULL;
  	if (CFQQ_SEEKY(cur_cfqq))
  		return NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1737
  	/*
b9d8f4c73   Gui Jianfeng   cfq: Optimization...
1738
1739
1740
1741
1742
1743
  	 * Don't search priority tree if it's the only queue in the group.
  	 */
  	if (cur_cfqq->cfqg->nr_cfqq == 1)
  		return NULL;
  
  	/*
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1744
1745
1746
  	 * We should notice if some of the queues are cooperating, eg
  	 * working closely on the same area of the disk. In that case,
  	 * we can group them together and don't waste time idling.
6d048f531   Jens Axboe   cfq-iosched: deve...
1747
  	 */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1748
1749
1750
  	cfqq = cfqq_close(cfqd, cur_cfqq);
  	if (!cfqq)
  		return NULL;
8682e1f15   Vivek Goyal   blkio: Provide so...
1751
1752
1753
  	/* If new queue belongs to different cfq_group, don't choose it */
  	if (cur_cfqq->cfqg != cfqq->cfqg)
  		return NULL;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1754
1755
1756
1757
1758
  	/*
  	 * It only makes sense to merge sync queues.
  	 */
  	if (!cfq_cfqq_sync(cfqq))
  		return NULL;
e6c5bc737   Jeff Moyer   cfq: break apart ...
1759
1760
  	if (CFQQ_SEEKY(cfqq))
  		return NULL;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1761

c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1762
1763
1764
1765
1766
  	/*
  	 * Do not merge queues of different priority classes
  	 */
  	if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
  		return NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1767
  	return cfqq;
6d048f531   Jens Axboe   cfq-iosched: deve...
1768
  }
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1769
1770
1771
1772
1773
1774
1775
  /*
   * Determine whether we should enforce idle window for this queue.
   */
  
  static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	enum wl_prio_t prio = cfqq_prio(cfqq);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
1776
  	struct cfq_rb_root *service_tree = cfqq->service_tree;
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1777

f04a64246   Vivek Goyal   blkio: Keep queue...
1778
1779
  	BUG_ON(!service_tree);
  	BUG_ON(!service_tree->count);
b6508c161   Vivek Goyal   cfq-iosched: Do n...
1780
1781
  	if (!cfqd->cfq_slice_idle)
  		return false;
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1782
1783
1784
1785
1786
  	/* We never do for idle class queues. */
  	if (prio == IDLE_WORKLOAD)
  		return false;
  
  	/* We do for queues that were marked with idle window flag. */
3c764b7a6   Shaohua Li   cfq-iosched: make...
1787
1788
  	if (cfq_cfqq_idle_window(cfqq) &&
  	   !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1789
1790
1791
1792
1793
1794
  		return true;
  
  	/*
  	 * Otherwise, we do only if they are the last ones
  	 * in their service tree.
  	 */
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
1795
1796
1797
1798
1799
  	if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
  		return 1;
  	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
  			service_tree->count);
  	return 0;
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1800
  }
6d048f531   Jens Axboe   cfq-iosched: deve...
1801
  static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507c   Jens Axboe   [PATCH] Update cf...
1802
  {
1792669cc   Jens Axboe   cfq-iosched: don'...
1803
  	struct cfq_queue *cfqq = cfqd->active_queue;
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1804
  	struct cfq_io_context *cic;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1805
  	unsigned long sl, group_idle = 0;
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1806

a68bbddba   Jens Axboe   block: add queue ...
1807
  	/*
f7d7b7a7a   Jens Axboe   block: as/cfq ssd...
1808
1809
1810
  	 * SSD device without seek penalty, disable idling. But only do so
  	 * for devices that support queuing, otherwise we still have a problem
  	 * with sync vs async workloads.
a68bbddba   Jens Axboe   block: add queue ...
1811
  	 */
f7d7b7a7a   Jens Axboe   block: as/cfq ssd...
1812
  	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
a68bbddba   Jens Axboe   block: add queue ...
1813
  		return;
dd67d0515   Jens Axboe   [PATCH] rbtree: s...
1814
  	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f531   Jens Axboe   cfq-iosched: deve...
1815
  	WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507c   Jens Axboe   [PATCH] Update cf...
1816
1817
1818
1819
  
  	/*
  	 * idle is disabled, either manually or by past process history
  	 */
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1820
1821
1822
1823
1824
1825
1826
  	if (!cfq_should_idle(cfqd, cfqq)) {
  		/* no queue idling. Check for group idling */
  		if (cfqd->cfq_group_idle)
  			group_idle = cfqd->cfq_group_idle;
  		else
  			return;
  	}
6d048f531   Jens Axboe   cfq-iosched: deve...
1827

22e2c507c   Jens Axboe   [PATCH] Update cf...
1828
  	/*
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
1829
  	 * still active requests from this queue, don't idle
7b679138b   Jens Axboe   cfq-iosched: add ...
1830
  	 */
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
1831
  	if (cfqq->dispatched)
7b679138b   Jens Axboe   cfq-iosched: add ...
1832
1833
1834
  		return;
  
  	/*
22e2c507c   Jens Axboe   [PATCH] Update cf...
1835
1836
  	 * task has exited, don't wait
  	 */
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1837
  	cic = cfqd->active_cic;
66dac98ed   Nikanth Karthikesan   io_context sharin...
1838
  	if (!cic || !atomic_read(&cic->ioc->nr_tasks))
6d048f531   Jens Axboe   cfq-iosched: deve...
1839
  		return;
355b659c8   Corrado Zoccolo   cfq-iosched: avoi...
1840
1841
1842
1843
1844
1845
  	/*
  	 * If our average think time is larger than the remaining time
  	 * slice, then don't idle. This avoids overrunning the allotted
  	 * time slice.
  	 */
  	if (sample_valid(cic->ttime_samples) &&
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
1846
1847
1848
  	    (cfqq->slice_end - jiffies < cic->ttime_mean)) {
  		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
  				cic->ttime_mean);
355b659c8   Corrado Zoccolo   cfq-iosched: avoi...
1849
  		return;
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
1850
  	}
355b659c8   Corrado Zoccolo   cfq-iosched: avoi...
1851

80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1852
1853
1854
  	/* There are other queues in the group, don't do group idle */
  	if (group_idle && cfqq->cfqg->nr_cfqq > 1)
  		return;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1855
  	cfq_mark_cfqq_wait_request(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1856

80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1857
1858
1859
1860
  	if (group_idle)
  		sl = cfqd->cfq_group_idle;
  	else
  		sl = cfqd->cfq_slice_idle;
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1861

7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1862
  	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1863
  	cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1864
1865
  	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
  			group_idle ? 1 : 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1866
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1867
1868
1869
  /*
   * Move request from internal lists to the request queue dispatch list.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1870
  static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1871
  {
3ed9a2965   Jens Axboe   cfq-iosched: impr...
1872
  	struct cfq_data *cfqd = q->elevator->elevator_data;
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1873
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1874

7b679138b   Jens Axboe   cfq-iosched: add ...
1875
  	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
06d218864   Jeff Moyer   cfq: choose a new...
1876
  	cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1877
  	cfq_remove_request(rq);
6d048f531   Jens Axboe   cfq-iosched: deve...
1878
  	cfqq->dispatched++;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1879
  	(RQ_CFQG(rq))->dispatched++;
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1880
  	elv_dispatch_sort(q, rq);
3ed9a2965   Jens Axboe   cfq-iosched: impr...
1881

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1882
  	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
c4e7893eb   Vivek Goyal   cfq-iosched: blkt...
1883
  	cfqq->nr_sectors += blk_rq_sectors(rq);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1884
  	cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
84c124da9   Divyesh Shah   blkio: Changes to...
1885
  					rq_data_dir(rq), rq_is_sync(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1886
1887
1888
1889
1890
  }
  
  /*
   * return expired entry, or NULL to just start from scratch in rbtree
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1891
  static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1892
  {
30996f40b   Jens Axboe   cfq-iosched: fix ...
1893
  	struct request *rq = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1894

3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1895
  	if (cfq_cfqq_fifo_expire(cfqq))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1896
  		return NULL;
cb8874119   Jens Axboe   cfq-iosched: twea...
1897
1898
  
  	cfq_mark_cfqq_fifo_expire(cfqq);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
1899
1900
  	if (list_empty(&cfqq->fifo))
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1901

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
1902
  	rq = rq_entry_fifo(cfqq->fifo.next);
30996f40b   Jens Axboe   cfq-iosched: fix ...
1903
  	if (time_before(jiffies, rq_fifo_time(rq)))
7b679138b   Jens Axboe   cfq-iosched: add ...
1904
  		rq = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1905

30996f40b   Jens Axboe   cfq-iosched: fix ...
1906
  	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
6d048f531   Jens Axboe   cfq-iosched: deve...
1907
  	return rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1908
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
1909
1910
1911
1912
  static inline int
  cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1913

22e2c507c   Jens Axboe   [PATCH] Update cf...
1914
  	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1915

22e2c507c   Jens Axboe   [PATCH] Update cf...
1916
  	return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1917
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
1918
  /*
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
   * Must be called with the queue_lock held.
   */
  static int cfqq_process_refs(struct cfq_queue *cfqq)
  {
  	int process_refs, io_refs;
  
  	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
  	process_refs = atomic_read(&cfqq->ref) - io_refs;
  	BUG_ON(process_refs < 0);
  	return process_refs;
  }
  
  static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
  {
e6c5bc737   Jeff Moyer   cfq: break apart ...
1933
  	int process_refs, new_process_refs;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1934
  	struct cfq_queue *__cfqq;
c10b61f09   Jeff Moyer   cfq: Don't allow ...
1935
1936
1937
1938
1939
1940
1941
1942
  	/*
  	 * If there are no process references on the new_cfqq, then it is
  	 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
  	 * chain may have dropped their last reference (not just their
  	 * last process reference).
  	 */
  	if (!cfqq_process_refs(new_cfqq))
  		return;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1943
1944
1945
1946
1947
1948
1949
1950
  	/* Avoid a circular list and skip interim queue merges */
  	while ((__cfqq = new_cfqq->new_cfqq)) {
  		if (__cfqq == cfqq)
  			return;
  		new_cfqq = __cfqq;
  	}
  
  	process_refs = cfqq_process_refs(cfqq);
c10b61f09   Jeff Moyer   cfq: Don't allow ...
1951
  	new_process_refs = cfqq_process_refs(new_cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1952
1953
1954
1955
  	/*
  	 * If the process for the cfqq has gone away, there is no
  	 * sense in merging the queues.
  	 */
c10b61f09   Jeff Moyer   cfq: Don't allow ...
1956
  	if (process_refs == 0 || new_process_refs == 0)
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1957
  		return;
e6c5bc737   Jeff Moyer   cfq: break apart ...
1958
1959
1960
  	/*
  	 * Merge in the direction of the lesser amount of work.
  	 */
e6c5bc737   Jeff Moyer   cfq: break apart ...
1961
1962
1963
1964
1965
1966
1967
  	if (new_process_refs >= process_refs) {
  		cfqq->new_cfqq = new_cfqq;
  		atomic_add(process_refs, &new_cfqq->ref);
  	} else {
  		new_cfqq->new_cfqq = cfqq;
  		atomic_add(new_process_refs, &cfqq->ref);
  	}
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1968
  }
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
1969
  static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
65b32a573   Vivek Goyal   cfq-iosched: Remo...
1970
  				struct cfq_group *cfqg, enum wl_prio_t prio)
718eee057   Corrado Zoccolo   cfq-iosched: fair...
1971
1972
1973
1974
1975
1976
  {
  	struct cfq_queue *queue;
  	int i;
  	bool key_valid = false;
  	unsigned long lowest_key = 0;
  	enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
65b32a573   Vivek Goyal   cfq-iosched: Remo...
1977
1978
1979
  	for (i = 0; i <= SYNC_WORKLOAD; ++i) {
  		/* select the one with lowest rb_key */
  		queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
718eee057   Corrado Zoccolo   cfq-iosched: fair...
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
  		if (queue &&
  		    (!key_valid || time_before(queue->rb_key, lowest_key))) {
  			lowest_key = queue->rb_key;
  			cur_best = i;
  			key_valid = true;
  		}
  	}
  
  	return cur_best;
  }
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
1990
  static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
718eee057   Corrado Zoccolo   cfq-iosched: fair...
1991
  {
718eee057   Corrado Zoccolo   cfq-iosched: fair...
1992
1993
  	unsigned slice;
  	unsigned count;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
1994
  	struct cfq_rb_root *st;
58ff82f34   Vivek Goyal   blkio: Implement ...
1995
  	unsigned group_slice;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
1996

1fa8f6d68   Vivek Goyal   blkio: Introduce ...
1997
1998
1999
2000
2001
  	if (!cfqg) {
  		cfqd->serving_prio = IDLE_WORKLOAD;
  		cfqd->workload_expires = jiffies + 1;
  		return;
  	}
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2002
  	/* Choose next priority. RT > BE > IDLE */
58ff82f34   Vivek Goyal   blkio: Implement ...
2003
  	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2004
  		cfqd->serving_prio = RT_WORKLOAD;
58ff82f34   Vivek Goyal   blkio: Implement ...
2005
  	else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
  		cfqd->serving_prio = BE_WORKLOAD;
  	else {
  		cfqd->serving_prio = IDLE_WORKLOAD;
  		cfqd->workload_expires = jiffies + 1;
  		return;
  	}
  
  	/*
  	 * For RT and BE, we have to choose also the type
  	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
  	 * expiration time
  	 */
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2018
  	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2019
  	count = st->count;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2020
2021
  
  	/*
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2022
  	 * check workload expiration, and that we still have other queues ready
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2023
  	 */
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2024
  	if (count && !time_after(jiffies, cfqd->workload_expires))
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2025
2026
2027
2028
  		return;
  
  	/* otherwise select new workload type */
  	cfqd->serving_type =
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2029
2030
  		cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
  	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2031
  	count = st->count;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2032
2033
2034
2035
2036
2037
  
  	/*
  	 * the workload slice is computed as a fraction of target latency
  	 * proportional to the number of queues in that workload, over
  	 * all the queues in the same priority class
  	 */
58ff82f34   Vivek Goyal   blkio: Implement ...
2038
2039
2040
2041
2042
  	group_slice = cfq_group_slice(cfqd, cfqg);
  
  	slice = group_slice * count /
  		max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
  		      cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2043

f26bd1f0a   Vivek Goyal   blkio: Determine ...
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
  	if (cfqd->serving_type == ASYNC_WORKLOAD) {
  		unsigned int tmp;
  
  		/*
  		 * Async queues are currently system wide. Just taking
  		 * proportion of queues with-in same group will lead to higher
  		 * async ratio system wide as generally root group is going
  		 * to have higher weight. A more accurate thing would be to
  		 * calculate system wide asnc/sync ratio.
  		 */
  		tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
  		tmp = tmp/cfqd->busy_queues;
  		slice = min_t(unsigned, slice, tmp);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2057
2058
2059
  		/* async workload slice is scaled down according to
  		 * the sync/async slice ratio. */
  		slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
f26bd1f0a   Vivek Goyal   blkio: Determine ...
2060
  	} else
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2061
2062
2063
2064
  		/* sync workload slice is at least 2 * cfq_slice_idle */
  		slice = max(slice, 2 * cfqd->cfq_slice_idle);
  
  	slice = max_t(unsigned, slice, CFQ_MIN_TT);
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
2065
  	cfq_log(cfqd, "workload slice:%d", slice);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2066
2067
  	cfqd->workload_expires = jiffies + slice;
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2068
2069
2070
  static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b077   Vivek Goyal   blkio: Introduce ...
2071
  	struct cfq_group *cfqg;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2072
2073
2074
  
  	if (RB_EMPTY_ROOT(&st->rb))
  		return NULL;
25bc6b077   Vivek Goyal   blkio: Introduce ...
2075
2076
2077
2078
  	cfqg = cfq_rb_first_group(st);
  	st->active = &cfqg->rb_node;
  	update_min_vdisktime(st);
  	return cfqg;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2079
  }
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2080
2081
  static void cfq_choose_cfqg(struct cfq_data *cfqd)
  {
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2082
2083
2084
  	struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
  
  	cfqd->serving_group = cfqg;
dae739ebc   Vivek Goyal   blkio: Group time...
2085
2086
2087
2088
2089
2090
  
  	/* Restore the workload type data */
  	if (cfqg->saved_workload_slice) {
  		cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
  		cfqd->serving_type = cfqg->saved_workload;
  		cfqd->serving_prio = cfqg->saved_serving_prio;
66ae29197   Gui Jianfeng   cfq: set workload...
2091
2092
  	} else
  		cfqd->workload_expires = jiffies - 1;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2093
  	choose_service_tree(cfqd, cfqg);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2094
  }
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2095
  /*
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
2096
2097
   * Select a queue for service. If we have a current active queue,
   * check whether to continue servicing it, or retrieve and set a new one.
22e2c507c   Jens Axboe   [PATCH] Update cf...
2098
   */
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2099
  static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2100
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
2101
  	struct cfq_queue *cfqq, *new_cfqq = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2102

22e2c507c   Jens Axboe   [PATCH] Update cf...
2103
2104
2105
  	cfqq = cfqd->active_queue;
  	if (!cfqq)
  		goto new_queue;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2106

f04a64246   Vivek Goyal   blkio: Keep queue...
2107
2108
  	if (!cfqd->rq_queued)
  		return NULL;
c244bb50a   Vivek Goyal   cfq-iosched: Get ...
2109
2110
2111
2112
2113
2114
  
  	/*
  	 * We were waiting for group to get backlogged. Expire the queue
  	 */
  	if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
  		goto expire;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2115
  	/*
6d048f531   Jens Axboe   cfq-iosched: deve...
2116
  	 * The active queue has run out of time, expire it and select new.
22e2c507c   Jens Axboe   [PATCH] Update cf...
2117
  	 */
7667aa063   Vivek Goyal   cfq-iosched: Take...
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
  	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
  		/*
  		 * If slice had not expired at the completion of last request
  		 * we might not have turned on wait_busy flag. Don't expire
  		 * the queue yet. Allow the group to get backlogged.
  		 *
  		 * The very fact that we have used the slice, that means we
  		 * have been idling all along on this queue and it should be
  		 * ok to wait for this request to complete.
  		 */
82bbbf28d   Vivek Goyal   Fix a CFQ crash i...
2128
2129
2130
  		if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
  		    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  			cfqq = NULL;
7667aa063   Vivek Goyal   cfq-iosched: Take...
2131
  			goto keep_queue;
82bbbf28d   Vivek Goyal   Fix a CFQ crash i...
2132
  		} else
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
2133
  			goto check_group_idle;
7667aa063   Vivek Goyal   cfq-iosched: Take...
2134
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2135

22e2c507c   Jens Axboe   [PATCH] Update cf...
2136
  	/*
6d048f531   Jens Axboe   cfq-iosched: deve...
2137
2138
  	 * The active queue has requests and isn't expired, allow it to
  	 * dispatch.
22e2c507c   Jens Axboe   [PATCH] Update cf...
2139
  	 */
dd67d0515   Jens Axboe   [PATCH] rbtree: s...
2140
  	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507c   Jens Axboe   [PATCH] Update cf...
2141
  		goto keep_queue;
6d048f531   Jens Axboe   cfq-iosched: deve...
2142
2143
  
  	/*
a36e71f99   Jens Axboe   cfq-iosched: add ...
2144
2145
2146
  	 * If another queue has a request waiting within our mean seek
  	 * distance, let it run.  The expire code will check for close
  	 * cooperators and put the close queue at the front of the service
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2147
  	 * tree.  If possible, merge the expiring queue with the new cfqq.
a36e71f99   Jens Axboe   cfq-iosched: add ...
2148
  	 */
b3b6d0408   Jeff Moyer   cfq: change the m...
2149
  	new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2150
2151
2152
  	if (new_cfqq) {
  		if (!cfqq->new_cfqq)
  			cfq_setup_merge(cfqq, new_cfqq);
a36e71f99   Jens Axboe   cfq-iosched: add ...
2153
  		goto expire;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2154
  	}
a36e71f99   Jens Axboe   cfq-iosched: add ...
2155
2156
  
  	/*
6d048f531   Jens Axboe   cfq-iosched: deve...
2157
2158
2159
2160
  	 * No requests pending. If the active queue still has requests in
  	 * flight or is idling for a new request, allow either of these
  	 * conditions to happen (or time out) before selecting a new queue.
  	 */
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
  	if (timer_pending(&cfqd->idle_slice_timer)) {
  		cfqq = NULL;
  		goto keep_queue;
  	}
  
  	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  		cfqq = NULL;
  		goto keep_queue;
  	}
  
  	/*
  	 * If group idle is enabled and there are requests dispatched from
  	 * this group, wait for requests to complete.
  	 */
  check_group_idle:
  	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
  	    && cfqq->cfqg->dispatched) {
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2178
2179
  		cfqq = NULL;
  		goto keep_queue;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2180
  	}
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2181
  expire:
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2182
  	cfq_slice_expired(cfqd, 0);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2183
  new_queue:
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2184
2185
2186
2187
2188
  	/*
  	 * Current queue expired. Check if we have to switch to a new
  	 * service tree
  	 */
  	if (!new_cfqq)
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2189
  		cfq_choose_cfqg(cfqd);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2190

a36e71f99   Jens Axboe   cfq-iosched: add ...
2191
  	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2192
  keep_queue:
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2193
  	return cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2194
  }
febffd618   Jens Axboe   cfq-iosched: kill...
2195
  static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2196
2197
2198
2199
2200
2201
2202
2203
2204
  {
  	int dispatched = 0;
  
  	while (cfqq->next_rq) {
  		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
  		dispatched++;
  	}
  
  	BUG_ON(!list_empty(&cfqq->fifo));
f04a64246   Vivek Goyal   blkio: Keep queue...
2205
2206
  
  	/* By default cfqq is not expired if it is empty. Do it explicitly */
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2207
  	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2208
2209
  	return dispatched;
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
2210
2211
2212
2213
  /*
   * Drain our current requests. Used for barriers and when switching
   * io schedulers on-the-fly.
   */
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2214
  static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2215
  {
0871714e0   Jens Axboe   cfq-iosched: rela...
2216
  	struct cfq_queue *cfqq;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2217
  	int dispatched = 0;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2218

3440c49f5   Divyesh Shah   cfq-iosched: Fix ...
2219
  	/* Expire the timeslice of the current active queue first */
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2220
  	cfq_slice_expired(cfqd, 0);
3440c49f5   Divyesh Shah   cfq-iosched: Fix ...
2221
2222
  	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
  		__cfq_set_active_queue(cfqd, cfqq);
f04a64246   Vivek Goyal   blkio: Keep queue...
2223
  		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3440c49f5   Divyesh Shah   cfq-iosched: Fix ...
2224
  	}
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2225

1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2226
  	BUG_ON(cfqd->busy_queues);
6923715ae   Jeff Moyer   cfq: remove extra...
2227
  	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2228
2229
  	return dispatched;
  }
abc3c744d   Shaohua Li   cfq-iosched: quan...
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
  static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
  	struct cfq_queue *cfqq)
  {
  	/* the queue hasn't finished any request, can't estimate */
  	if (cfq_cfqq_slice_new(cfqq))
  		return 1;
  	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
  		cfqq->slice_end))
  		return 1;
  
  	return 0;
  }
0b182d617   Jens Axboe   cfq-iosched: abst...
2242
  static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2243
  {
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2244
  	unsigned int max_dispatch;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2245

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2246
  	/*
5ad531db6   Jens Axboe   cfq-iosched: drai...
2247
2248
  	 * Drain async requests before we start sync IO
  	 */
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
2249
  	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
0b182d617   Jens Axboe   cfq-iosched: abst...
2250
  		return false;
5ad531db6   Jens Axboe   cfq-iosched: drai...
2251
2252
  
  	/*
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2253
2254
  	 * If this is an async queue and we have sync IO in flight, let it wait
  	 */
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
2255
  	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
0b182d617   Jens Axboe   cfq-iosched: abst...
2256
  		return false;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2257

abc3c744d   Shaohua Li   cfq-iosched: quan...
2258
  	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2259
2260
  	if (cfq_class_idle(cfqq))
  		max_dispatch = 1;
b4878f245   Jens Axboe   [PATCH] 02/05: up...
2261

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2262
2263
2264
2265
2266
2267
2268
  	/*
  	 * Does this cfqq already have too much IO in flight?
  	 */
  	if (cfqq->dispatched >= max_dispatch) {
  		/*
  		 * idle queue must always only have a single IO in flight
  		 */
3ed9a2965   Jens Axboe   cfq-iosched: impr...
2269
  		if (cfq_class_idle(cfqq))
0b182d617   Jens Axboe   cfq-iosched: abst...
2270
  			return false;
3ed9a2965   Jens Axboe   cfq-iosched: impr...
2271

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2272
2273
2274
  		/*
  		 * We have other queues, don't allow more IO from this one
  		 */
abc3c744d   Shaohua Li   cfq-iosched: quan...
2275
  		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
0b182d617   Jens Axboe   cfq-iosched: abst...
2276
  			return false;
9ede209e8   Jens Axboe   cfq-iosched: impr...
2277

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2278
  		/*
474b18ccc   Shaohua Li   cfq-iosched: no d...
2279
  		 * Sole queue user, no limit
365722bb9   Vivek Goyal   cfq-iosched: dela...
2280
  		 */
abc3c744d   Shaohua Li   cfq-iosched: quan...
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
  		if (cfqd->busy_queues == 1)
  			max_dispatch = -1;
  		else
  			/*
  			 * Normally we start throttling cfqq when cfq_quantum/2
  			 * requests have been dispatched. But we can drive
  			 * deeper queue depths at the beginning of slice
  			 * subjected to upper limit of cfq_quantum.
  			 * */
  			max_dispatch = cfqd->cfq_quantum;
8e2967555   Jens Axboe   cfq-iosched: impl...
2291
2292
2293
2294
2295
2296
2297
  	}
  
  	/*
  	 * Async queues must wait a bit before being allowed dispatch.
  	 * We also ramp up the dispatch depth gradually for async IO,
  	 * based on the last sync IO we serviced
  	 */
963b72fc6   Jens Axboe   cfq-iosched: rena...
2298
  	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
573412b29   Corrado Zoccolo   cfq-iosched: redu...
2299
  		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
8e2967555   Jens Axboe   cfq-iosched: impl...
2300
  		unsigned int depth;
365722bb9   Vivek Goyal   cfq-iosched: dela...
2301

61f0c1dca   Jens Axboe   cfq-iosched: use ...
2302
  		depth = last_sync / cfqd->cfq_slice[1];
e00c54c36   Jens Axboe   cfq-iosched: don'...
2303
2304
  		if (!depth && !cfqq->dispatched)
  			depth = 1;
8e2967555   Jens Axboe   cfq-iosched: impl...
2305
2306
  		if (depth < max_dispatch)
  			max_dispatch = depth;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2307
  	}
3ed9a2965   Jens Axboe   cfq-iosched: impr...
2308

0b182d617   Jens Axboe   cfq-iosched: abst...
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
  	/*
  	 * If we're below the current max, allow a dispatch
  	 */
  	return cfqq->dispatched < max_dispatch;
  }
  
  /*
   * Dispatch a request from cfqq, moving them to the request queue
   * dispatch list.
   */
  static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	struct request *rq;
  
  	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
  
  	if (!cfq_may_dispatch(cfqd, cfqq))
  		return false;
  
  	/*
  	 * follow expired path, else get first next available
  	 */
  	rq = cfq_check_fifo(cfqq);
  	if (!rq)
  		rq = cfqq->next_rq;
  
  	/*
  	 * insert request into driver dispatch list
  	 */
  	cfq_dispatch_insert(cfqd->queue, rq);
  
  	if (!cfqd->active_cic) {
  		struct cfq_io_context *cic = RQ_CIC(rq);
  
  		atomic_long_inc(&cic->ioc->refcount);
  		cfqd->active_cic = cic;
  	}
  
  	return true;
  }
  
  /*
   * Find the cfqq that we need to service and move a request from that to the
   * dispatch list
   */
  static int cfq_dispatch_requests(struct request_queue *q, int force)
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct cfq_queue *cfqq;
  
  	if (!cfqd->busy_queues)
  		return 0;
  
  	if (unlikely(force))
  		return cfq_forced_dispatch(cfqd);
  
  	cfqq = cfq_select_queue(cfqd);
  	if (!cfqq)
8e2967555   Jens Axboe   cfq-iosched: impl...
2367
  		return 0;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2368
  	/*
0b182d617   Jens Axboe   cfq-iosched: abst...
2369
  	 * Dispatch a request from this cfqq, if it is allowed
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2370
  	 */
0b182d617   Jens Axboe   cfq-iosched: abst...
2371
2372
  	if (!cfq_dispatch_request(cfqd, cfqq))
  		return 0;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2373
  	cfqq->slice_dispatch++;
b029195dd   Jens Axboe   cfq-iosched: don'...
2374
  	cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2375

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2376
2377
2378
2379
2380
2381
2382
2383
  	/*
  	 * expire an async queue immediately if it has used up its slice. idle
  	 * queue always expire after 1 dispatch round.
  	 */
  	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
  	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  	    cfq_class_idle(cfqq))) {
  		cfqq->slice_end = jiffies + 1;
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2384
  		cfq_slice_expired(cfqd, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2385
  	}
b217a903a   Shan Wei   cfq: fix the log ...
2386
  	cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2387
  	return 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2388
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2389
  /*
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
2390
2391
   * task holds one reference to the queue, dropped when task exits. each rq
   * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2392
   *
b1c357696   Vivek Goyal   blkio: Take care ...
2393
   * Each cfq queue took a reference on the parent group. Drop it now.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2394
2395
2396
2397
   * queue lock must be held here.
   */
  static void cfq_put_queue(struct cfq_queue *cfqq)
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
2398
  	struct cfq_data *cfqd = cfqq->cfqd;
878eaddd0   Vivek Goyal   cfq-iosched: Do n...
2399
  	struct cfq_group *cfqg, *orig_cfqg;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2400
2401
  
  	BUG_ON(atomic_read(&cfqq->ref) <= 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2402
2403
2404
  
  	if (!atomic_dec_and_test(&cfqq->ref))
  		return;
7b679138b   Jens Axboe   cfq-iosched: add ...
2405
  	cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2406
  	BUG_ON(rb_first(&cfqq->sort_list));
22e2c507c   Jens Axboe   [PATCH] Update cf...
2407
  	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
b1c357696   Vivek Goyal   blkio: Take care ...
2408
  	cfqg = cfqq->cfqg;
878eaddd0   Vivek Goyal   cfq-iosched: Do n...
2409
  	orig_cfqg = cfqq->orig_cfqg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2410

28f95cbc3   Jens Axboe   cfq-iosched: remo...
2411
  	if (unlikely(cfqd->active_queue == cfqq)) {
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2412
  		__cfq_slice_expired(cfqd, cfqq, 0);
23e018a1b   Jens Axboe   block: get rid of...
2413
  		cfq_schedule_dispatch(cfqd);
28f95cbc3   Jens Axboe   cfq-iosched: remo...
2414
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
2415

f04a64246   Vivek Goyal   blkio: Keep queue...
2416
  	BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2417
  	kmem_cache_free(cfq_pool, cfqq);
b1c357696   Vivek Goyal   blkio: Take care ...
2418
  	cfq_put_cfqg(cfqg);
878eaddd0   Vivek Goyal   cfq-iosched: Do n...
2419
2420
  	if (orig_cfqg)
  		cfq_put_cfqg(orig_cfqg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2421
  }
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2422
2423
2424
  /*
   * Must always be called with the rcu_read_lock() held
   */
07416d29b   Jens Axboe   cfq-iosched: fix ...
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
  static void
  __call_for_each_cic(struct io_context *ioc,
  		    void (*func)(struct io_context *, struct cfq_io_context *))
  {
  	struct cfq_io_context *cic;
  	struct hlist_node *n;
  
  	hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
  		func(ioc, cic);
  }
4ac845a2e   Jens Axboe   block: cfq: make ...
2435
  /*
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2436
   * Call func for each cic attached to this ioc.
4ac845a2e   Jens Axboe   block: cfq: make ...
2437
   */
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2438
  static void
4ac845a2e   Jens Axboe   block: cfq: make ...
2439
2440
  call_for_each_cic(struct io_context *ioc,
  		  void (*func)(struct io_context *, struct cfq_io_context *))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2441
  {
4ac845a2e   Jens Axboe   block: cfq: make ...
2442
  	rcu_read_lock();
07416d29b   Jens Axboe   cfq-iosched: fix ...
2443
  	__call_for_each_cic(ioc, func);
4ac845a2e   Jens Axboe   block: cfq: make ...
2444
  	rcu_read_unlock();
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2445
2446
2447
2448
2449
2450
2451
2452
2453
  }
  
  static void cfq_cic_free_rcu(struct rcu_head *head)
  {
  	struct cfq_io_context *cic;
  
  	cic = container_of(head, struct cfq_io_context, rcu_head);
  
  	kmem_cache_free(cfq_ioc_pool, cic);
245b2e70e   Tejun Heo   percpu: clean up ...
2454
  	elv_ioc_count_dec(cfq_ioc_count);
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2455

9a11b4ed0   Jens Axboe   cfq-iosched: prop...
2456
2457
2458
2459
2460
2461
2462
  	if (ioc_gone) {
  		/*
  		 * CFQ scheduler is exiting, grab exit lock and check
  		 * the pending io context count. If it hits zero,
  		 * complete ioc_gone and set it back to NULL
  		 */
  		spin_lock(&ioc_gone_lock);
245b2e70e   Tejun Heo   percpu: clean up ...
2463
  		if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
9a11b4ed0   Jens Axboe   cfq-iosched: prop...
2464
2465
2466
2467
2468
  			complete(ioc_gone);
  			ioc_gone = NULL;
  		}
  		spin_unlock(&ioc_gone_lock);
  	}
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2469
  }
4ac845a2e   Jens Axboe   block: cfq: make ...
2470

34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2471
2472
2473
  static void cfq_cic_free(struct cfq_io_context *cic)
  {
  	call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
4ac845a2e   Jens Axboe   block: cfq: make ...
2474
2475
2476
2477
2478
  }
  
  static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
  {
  	unsigned long flags;
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2479
  	unsigned long dead_key = (unsigned long) cic->key;
4ac845a2e   Jens Axboe   block: cfq: make ...
2480

bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2481
  	BUG_ON(!(dead_key & CIC_DEAD_KEY));
4ac845a2e   Jens Axboe   block: cfq: make ...
2482
2483
  
  	spin_lock_irqsave(&ioc->lock, flags);
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
2484
  	radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
2485
  	hlist_del_rcu(&cic->cic_list);
4ac845a2e   Jens Axboe   block: cfq: make ...
2486
  	spin_unlock_irqrestore(&ioc->lock, flags);
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2487
  	cfq_cic_free(cic);
4ac845a2e   Jens Axboe   block: cfq: make ...
2488
  }
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2489
2490
2491
2492
2493
  /*
   * Must be called with rcu_read_lock() held or preemption otherwise disabled.
   * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
   * and ->trim() which is called with the task lock held
   */
4ac845a2e   Jens Axboe   block: cfq: make ...
2494
2495
  static void cfq_free_io_context(struct io_context *ioc)
  {
4ac845a2e   Jens Axboe   block: cfq: make ...
2496
  	/*
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2497
2498
2499
2500
  	 * ioc->refcount is zero here, or we are called from elv_unregister(),
  	 * so no more cic's are allowed to be linked into this ioc.  So it
  	 * should be ok to iterate over the known list, we will see all cic's
  	 * since no new ones are added.
4ac845a2e   Jens Axboe   block: cfq: make ...
2501
  	 */
07416d29b   Jens Axboe   cfq-iosched: fix ...
2502
  	__call_for_each_cic(ioc, cic_free_func);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2503
  }
d02a2c077   Shaohua Li   cfq-iosched: fix ...
2504
  static void cfq_put_cooperator(struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2505
  {
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2506
  	struct cfq_queue *__cfqq, *next;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
  	/*
  	 * If this queue was scheduled to merge with another queue, be
  	 * sure to drop the reference taken on that queue (and others in
  	 * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
  	 */
  	__cfqq = cfqq->new_cfqq;
  	while (__cfqq) {
  		if (__cfqq == cfqq) {
  			WARN(1, "cfqq->new_cfqq loop detected
  ");
  			break;
  		}
  		next = __cfqq->new_cfqq;
  		cfq_put_queue(__cfqq);
  		__cfqq = next;
  	}
d02a2c077   Shaohua Li   cfq-iosched: fix ...
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
  }
  
  static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	if (unlikely(cfqq == cfqd->active_queue)) {
  		__cfq_slice_expired(cfqd, cfqq, 0);
  		cfq_schedule_dispatch(cfqd);
  	}
  
  	cfq_put_cooperator(cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2533

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2534
2535
  	cfq_put_queue(cfqq);
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
2536

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2537
2538
2539
  static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
  					 struct cfq_io_context *cic)
  {
4faa3c815   Fabio Checconi   cfq-iosched: do n...
2540
  	struct io_context *ioc = cic->ioc;
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2541
  	list_del_init(&cic->queue_list);
4ac845a2e   Jens Axboe   block: cfq: make ...
2542
2543
  
  	/*
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2544
  	 * Make sure dead mark is seen for dead queues
4ac845a2e   Jens Axboe   block: cfq: make ...
2545
  	 */
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2546
  	smp_wmb();
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2547
  	cic->key = cfqd_dead_key(cfqd);
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2548

4faa3c815   Fabio Checconi   cfq-iosched: do n...
2549
2550
  	if (ioc->ioc_data == cic)
  		rcu_assign_pointer(ioc->ioc_data, NULL);
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2551
2552
2553
  	if (cic->cfqq[BLK_RW_ASYNC]) {
  		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
  		cic->cfqq[BLK_RW_ASYNC] = NULL;
12a057321   Al Viro   [PATCH] keep sync...
2554
  	}
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2555
2556
2557
  	if (cic->cfqq[BLK_RW_SYNC]) {
  		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
  		cic->cfqq[BLK_RW_SYNC] = NULL;
12a057321   Al Viro   [PATCH] keep sync...
2558
  	}
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2559
  }
4ac845a2e   Jens Axboe   block: cfq: make ...
2560
2561
  static void cfq_exit_single_io_context(struct io_context *ioc,
  				       struct cfq_io_context *cic)
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2562
  {
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2563
  	struct cfq_data *cfqd = cic_to_cfqd(cic);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2564

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2565
  	if (cfqd) {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
2566
  		struct request_queue *q = cfqd->queue;
4ac845a2e   Jens Axboe   block: cfq: make ...
2567
  		unsigned long flags;
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2568

4ac845a2e   Jens Axboe   block: cfq: make ...
2569
  		spin_lock_irqsave(q->queue_lock, flags);
62c1fe9d9   Jens Axboe   cfq-iosched: fix ...
2570
2571
2572
2573
2574
2575
  
  		/*
  		 * Ensure we get a fresh copy of the ->key to prevent
  		 * race between exiting task and queue
  		 */
  		smp_read_barrier_depends();
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2576
  		if (cic->key == cfqd)
62c1fe9d9   Jens Axboe   cfq-iosched: fix ...
2577
  			__cfq_exit_single_io_context(cfqd, cic);
4ac845a2e   Jens Axboe   block: cfq: make ...
2578
  		spin_unlock_irqrestore(q->queue_lock, flags);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2579
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2580
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
2581
2582
2583
2584
  /*
   * The process that ioc belongs to has exited, we need to clean up
   * and put the internal structures we have that belongs to that process.
   */
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2585
  static void cfq_exit_io_context(struct io_context *ioc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2586
  {
4ac845a2e   Jens Axboe   block: cfq: make ...
2587
  	call_for_each_cic(ioc, cfq_exit_single_io_context);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2588
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
2589
  static struct cfq_io_context *
8267e268e   Al Viro   [PATCH] gfp_t: bl...
2590
  cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2591
  {
b5deef901   Jens Axboe   [PATCH] Make sure...
2592
  	struct cfq_io_context *cic;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2593

94f6030ca   Christoph Lameter   Slab allocators: ...
2594
2595
  	cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
  							cfqd->queue->node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2596
  	if (cic) {
22e2c507c   Jens Axboe   [PATCH] Update cf...
2597
  		cic->last_end_request = jiffies;
553698f94   Jens Axboe   [PATCH] cfq-iosch...
2598
  		INIT_LIST_HEAD(&cic->queue_list);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
2599
  		INIT_HLIST_NODE(&cic->cic_list);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2600
2601
  		cic->dtor = cfq_free_io_context;
  		cic->exit = cfq_exit_io_context;
245b2e70e   Tejun Heo   percpu: clean up ...
2602
  		elv_ioc_count_inc(cfq_ioc_count);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2603
2604
2605
2606
  	}
  
  	return cic;
  }
fd0928df9   Jens Axboe   ioprio: move io p...
2607
  static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2608
2609
2610
  {
  	struct task_struct *tsk = current;
  	int ioprio_class;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2611
  	if (!cfq_cfqq_prio_changed(cfqq))
22e2c507c   Jens Axboe   [PATCH] Update cf...
2612
  		return;
fd0928df9   Jens Axboe   ioprio: move io p...
2613
  	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2614
  	switch (ioprio_class) {
fe094d98e   Jens Axboe   cfq-iosched: make...
2615
2616
2617
2618
2619
  	default:
  		printk(KERN_ERR "cfq: bad prio %x
  ", ioprio_class);
  	case IOPRIO_CLASS_NONE:
  		/*
6d63c2755   Jens Axboe   cfq-iosched: make...
2620
  		 * no prio set, inherit CPU scheduling settings
fe094d98e   Jens Axboe   cfq-iosched: make...
2621
2622
  		 */
  		cfqq->ioprio = task_nice_ioprio(tsk);
6d63c2755   Jens Axboe   cfq-iosched: make...
2623
  		cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98e   Jens Axboe   cfq-iosched: make...
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
  		break;
  	case IOPRIO_CLASS_RT:
  		cfqq->ioprio = task_ioprio(ioc);
  		cfqq->ioprio_class = IOPRIO_CLASS_RT;
  		break;
  	case IOPRIO_CLASS_BE:
  		cfqq->ioprio = task_ioprio(ioc);
  		cfqq->ioprio_class = IOPRIO_CLASS_BE;
  		break;
  	case IOPRIO_CLASS_IDLE:
  		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  		cfqq->ioprio = 7;
  		cfq_clear_cfqq_idle_window(cfqq);
  		break;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2638
2639
2640
2641
2642
2643
2644
2645
  	}
  
  	/*
  	 * keep track of original prio settings in case we have to temporarily
  	 * elevate the priority of this queue
  	 */
  	cfqq->org_ioprio = cfqq->ioprio;
  	cfqq->org_ioprio_class = cfqq->ioprio_class;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2646
  	cfq_clear_cfqq_prio_changed(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2647
  }
febffd618   Jens Axboe   cfq-iosched: kill...
2648
  static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2649
  {
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2650
  	struct cfq_data *cfqd = cic_to_cfqd(cic);
478a82b0e   Al Viro   [PATCH] switch to...
2651
  	struct cfq_queue *cfqq;
c1b707d25   Jens Axboe   [PATCH] CFQ: bad ...
2652
  	unsigned long flags;
35e6077cb   Jens Axboe   [PATCH] cfq-iosch...
2653

caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2654
2655
  	if (unlikely(!cfqd))
  		return;
c1b707d25   Jens Axboe   [PATCH] CFQ: bad ...
2656
  	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2657

ff6657c6c   Jens Axboe   cfq-iosched: get ...
2658
  	cfqq = cic->cfqq[BLK_RW_ASYNC];
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2659
2660
  	if (cfqq) {
  		struct cfq_queue *new_cfqq;
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2661
2662
  		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
  						GFP_ATOMIC);
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2663
  		if (new_cfqq) {
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2664
  			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2665
2666
  			cfq_put_queue(cfqq);
  		}
22e2c507c   Jens Axboe   [PATCH] Update cf...
2667
  	}
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2668

ff6657c6c   Jens Axboe   cfq-iosched: get ...
2669
  	cfqq = cic->cfqq[BLK_RW_SYNC];
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2670
2671
  	if (cfqq)
  		cfq_mark_cfqq_prio_changed(cfqq);
c1b707d25   Jens Axboe   [PATCH] CFQ: bad ...
2672
  	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2673
  }
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2674
  static void cfq_ioc_set_ioprio(struct io_context *ioc)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2675
  {
4ac845a2e   Jens Axboe   block: cfq: make ...
2676
  	call_for_each_cic(ioc, changed_ioprio);
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2677
  	ioc->ioprio_changed = 0;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2678
  }
d5036d770   Jens Axboe   cfq-iosched: move...
2679
  static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2680
  			  pid_t pid, bool is_sync)
d5036d770   Jens Axboe   cfq-iosched: move...
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
  {
  	RB_CLEAR_NODE(&cfqq->rb_node);
  	RB_CLEAR_NODE(&cfqq->p_node);
  	INIT_LIST_HEAD(&cfqq->fifo);
  
  	atomic_set(&cfqq->ref, 0);
  	cfqq->cfqd = cfqd;
  
  	cfq_mark_cfqq_prio_changed(cfqq);
  
  	if (is_sync) {
  		if (!cfq_class_idle(cfqq))
  			cfq_mark_cfqq_idle_window(cfqq);
  		cfq_mark_cfqq_sync(cfqq);
  	}
  	cfqq->pid = pid;
  }
24610333d   Vivek Goyal   blkio: Drop the r...
2698
2699
2700
2701
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
  {
  	struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2702
  	struct cfq_data *cfqd = cic_to_cfqd(cic);
24610333d   Vivek Goyal   blkio: Drop the r...
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
  	unsigned long flags;
  	struct request_queue *q;
  
  	if (unlikely(!cfqd))
  		return;
  
  	q = cfqd->queue;
  
  	spin_lock_irqsave(q->queue_lock, flags);
  
  	if (sync_cfqq) {
  		/*
  		 * Drop reference to sync queue. A new sync queue will be
  		 * assigned in new group upon arrival of a fresh request.
  		 */
  		cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
  		cic_set_cfqq(cic, NULL, 1);
  		cfq_put_queue(sync_cfqq);
  	}
  
  	spin_unlock_irqrestore(q->queue_lock, flags);
  }
  
  static void cfq_ioc_set_cgroup(struct io_context *ioc)
  {
  	call_for_each_cic(ioc, changed_cgroup);
  	ioc->cgroup_changed = 0;
  }
  #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
22e2c507c   Jens Axboe   [PATCH] Update cf...
2732
  static struct cfq_queue *
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2733
  cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
fd0928df9   Jens Axboe   ioprio: move io p...
2734
  		     struct io_context *ioc, gfp_t gfp_mask)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2735
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
2736
  	struct cfq_queue *cfqq, *new_cfqq = NULL;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
2737
  	struct cfq_io_context *cic;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2738
  	struct cfq_group *cfqg;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2739
2740
  
  retry:
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2741
  	cfqg = cfq_get_cfqg(cfqd, 1);
4ac845a2e   Jens Axboe   block: cfq: make ...
2742
  	cic = cfq_cic_lookup(cfqd, ioc);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
2743
2744
  	/* cic always exists here */
  	cfqq = cic_to_cfqq(cic, is_sync);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2745

6118b70b3   Jens Axboe   cfq-iosched: get ...
2746
2747
2748
2749
2750
2751
  	/*
  	 * Always try a new alloc if we fell back to the OOM cfqq
  	 * originally, since it should just be a temporary situation.
  	 */
  	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  		cfqq = NULL;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2752
2753
2754
2755
2756
  		if (new_cfqq) {
  			cfqq = new_cfqq;
  			new_cfqq = NULL;
  		} else if (gfp_mask & __GFP_WAIT) {
  			spin_unlock_irq(cfqd->queue->queue_lock);
94f6030ca   Christoph Lameter   Slab allocators: ...
2757
  			new_cfqq = kmem_cache_alloc_node(cfq_pool,
6118b70b3   Jens Axboe   cfq-iosched: get ...
2758
  					gfp_mask | __GFP_ZERO,
94f6030ca   Christoph Lameter   Slab allocators: ...
2759
  					cfqd->queue->node);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2760
  			spin_lock_irq(cfqd->queue->queue_lock);
6118b70b3   Jens Axboe   cfq-iosched: get ...
2761
2762
  			if (new_cfqq)
  				goto retry;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2763
  		} else {
94f6030ca   Christoph Lameter   Slab allocators: ...
2764
2765
2766
  			cfqq = kmem_cache_alloc_node(cfq_pool,
  					gfp_mask | __GFP_ZERO,
  					cfqd->queue->node);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2767
  		}
6118b70b3   Jens Axboe   cfq-iosched: get ...
2768
2769
2770
  		if (cfqq) {
  			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
  			cfq_init_prio_data(cfqq, ioc);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2771
  			cfq_link_cfqq_cfqg(cfqq, cfqg);
6118b70b3   Jens Axboe   cfq-iosched: get ...
2772
2773
2774
  			cfq_log_cfqq(cfqd, cfqq, "alloced");
  		} else
  			cfqq = &cfqd->oom_cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2775
2776
2777
2778
  	}
  
  	if (new_cfqq)
  		kmem_cache_free(cfq_pool, new_cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2779
2780
  	return cfqq;
  }
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2781
2782
2783
  static struct cfq_queue **
  cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
  {
fe094d98e   Jens Axboe   cfq-iosched: make...
2784
  	switch (ioprio_class) {
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
  	case IOPRIO_CLASS_RT:
  		return &cfqd->async_cfqq[0][ioprio];
  	case IOPRIO_CLASS_BE:
  		return &cfqd->async_cfqq[1][ioprio];
  	case IOPRIO_CLASS_IDLE:
  		return &cfqd->async_idle_cfqq;
  	default:
  		BUG();
  	}
  }
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2795
  static struct cfq_queue *
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2796
  cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2797
2798
  	      gfp_t gfp_mask)
  {
fd0928df9   Jens Axboe   ioprio: move io p...
2799
2800
  	const int ioprio = task_ioprio(ioc);
  	const int ioprio_class = task_ioprio_class(ioc);
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2801
  	struct cfq_queue **async_cfqq = NULL;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2802
  	struct cfq_queue *cfqq = NULL;
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2803
2804
2805
2806
  	if (!is_sync) {
  		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
  		cfqq = *async_cfqq;
  	}
6118b70b3   Jens Axboe   cfq-iosched: get ...
2807
  	if (!cfqq)
fd0928df9   Jens Axboe   ioprio: move io p...
2808
  		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2809
2810
2811
2812
  
  	/*
  	 * pin the queue now that it's allocated, scheduler exit will prune it
  	 */
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2813
  	if (!is_sync && !(*async_cfqq)) {
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2814
  		atomic_inc(&cfqq->ref);
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2815
  		*async_cfqq = cfqq;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2816
2817
2818
2819
2820
  	}
  
  	atomic_inc(&cfqq->ref);
  	return cfqq;
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
2821
2822
2823
  /*
   * We drop cfq io contexts lazily, so we may find a dead one.
   */
dbecf3ab4   OGAWA Hirofumi   [PATCH 2/2] cfq: ...
2824
  static void
4ac845a2e   Jens Axboe   block: cfq: make ...
2825
2826
  cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
  		  struct cfq_io_context *cic)
dbecf3ab4   OGAWA Hirofumi   [PATCH 2/2] cfq: ...
2827
  {
4ac845a2e   Jens Axboe   block: cfq: make ...
2828
  	unsigned long flags;
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2829
  	WARN_ON(!list_empty(&cic->queue_list));
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2830
  	BUG_ON(cic->key != cfqd_dead_key(cfqd));
597bc485d   Jens Axboe   cfq-iosched: spee...
2831

4ac845a2e   Jens Axboe   block: cfq: make ...
2832
  	spin_lock_irqsave(&ioc->lock, flags);
4faa3c815   Fabio Checconi   cfq-iosched: do n...
2833
  	BUG_ON(ioc->ioc_data == cic);
597bc485d   Jens Axboe   cfq-iosched: spee...
2834

80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
2835
  	radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
2836
  	hlist_del_rcu(&cic->cic_list);
4ac845a2e   Jens Axboe   block: cfq: make ...
2837
2838
2839
  	spin_unlock_irqrestore(&ioc->lock, flags);
  
  	cfq_cic_free(cic);
dbecf3ab4   OGAWA Hirofumi   [PATCH 2/2] cfq: ...
2840
  }
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2841
  static struct cfq_io_context *
4ac845a2e   Jens Axboe   block: cfq: make ...
2842
  cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2843
  {
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2844
  	struct cfq_io_context *cic;
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2845
  	unsigned long flags;
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2846

91fac317a   Vasily Tarasov   cfq-iosched: get ...
2847
2848
  	if (unlikely(!ioc))
  		return NULL;
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2849
  	rcu_read_lock();
597bc485d   Jens Axboe   cfq-iosched: spee...
2850
2851
2852
  	/*
  	 * we maintain a last-hit cache, to avoid browsing over the tree
  	 */
4ac845a2e   Jens Axboe   block: cfq: make ...
2853
  	cic = rcu_dereference(ioc->ioc_data);
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2854
2855
  	if (cic && cic->key == cfqd) {
  		rcu_read_unlock();
597bc485d   Jens Axboe   cfq-iosched: spee...
2856
  		return cic;
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2857
  	}
597bc485d   Jens Axboe   cfq-iosched: spee...
2858

4ac845a2e   Jens Axboe   block: cfq: make ...
2859
  	do {
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
2860
  		cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
4ac845a2e   Jens Axboe   block: cfq: make ...
2861
2862
2863
  		rcu_read_unlock();
  		if (!cic)
  			break;
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2864
  		if (unlikely(cic->key != cfqd)) {
4ac845a2e   Jens Axboe   block: cfq: make ...
2865
  			cfq_drop_dead_cic(cfqd, ioc, cic);
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2866
  			rcu_read_lock();
4ac845a2e   Jens Axboe   block: cfq: make ...
2867
  			continue;
dbecf3ab4   OGAWA Hirofumi   [PATCH 2/2] cfq: ...
2868
  		}
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2869

d6de8be71   Jens Axboe   cfq-iosched: fix ...
2870
  		spin_lock_irqsave(&ioc->lock, flags);
4ac845a2e   Jens Axboe   block: cfq: make ...
2871
  		rcu_assign_pointer(ioc->ioc_data, cic);
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2872
  		spin_unlock_irqrestore(&ioc->lock, flags);
4ac845a2e   Jens Axboe   block: cfq: make ...
2873
2874
  		break;
  	} while (1);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2875

4ac845a2e   Jens Axboe   block: cfq: make ...
2876
  	return cic;
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2877
  }
4ac845a2e   Jens Axboe   block: cfq: make ...
2878
2879
2880
2881
2882
  /*
   * Add cic into ioc, using cfqd as the search key. This enables us to lookup
   * the process specific cfq io context when entered from the block layer.
   * Also adds the cic to a per-cfqd list, used when this queue is removed.
   */
febffd618   Jens Axboe   cfq-iosched: kill...
2883
2884
  static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
  			struct cfq_io_context *cic, gfp_t gfp_mask)
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2885
  {
0261d6886   Jens Axboe   [PATCH] CFQ: use ...
2886
  	unsigned long flags;
4ac845a2e   Jens Axboe   block: cfq: make ...
2887
  	int ret;
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2888

4ac845a2e   Jens Axboe   block: cfq: make ...
2889
2890
2891
2892
  	ret = radix_tree_preload(gfp_mask);
  	if (!ret) {
  		cic->ioc = ioc;
  		cic->key = cfqd;
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2893

4ac845a2e   Jens Axboe   block: cfq: make ...
2894
2895
  		spin_lock_irqsave(&ioc->lock, flags);
  		ret = radix_tree_insert(&ioc->radix_root,
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
2896
  						cfqd->cic_index, cic);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
2897
2898
  		if (!ret)
  			hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
4ac845a2e   Jens Axboe   block: cfq: make ...
2899
  		spin_unlock_irqrestore(&ioc->lock, flags);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2900

4ac845a2e   Jens Axboe   block: cfq: make ...
2901
2902
2903
2904
2905
2906
2907
  		radix_tree_preload_end();
  
  		if (!ret) {
  			spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  			list_add(&cic->queue_list, &cfqd->cic_list);
  			spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  		}
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2908
  	}
4ac845a2e   Jens Axboe   block: cfq: make ...
2909
2910
2911
  	if (ret)
  		printk(KERN_ERR "cfq: cic link failed!
  ");
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2912

4ac845a2e   Jens Axboe   block: cfq: make ...
2913
  	return ret;
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2914
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2915
2916
2917
  /*
   * Setup general io context and cfq io context. There can be several cfq
   * io contexts per general io context, if this process is doing io to more
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2918
   * than one device managed by cfq.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2919
2920
   */
  static struct cfq_io_context *
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2921
  cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2922
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
2923
  	struct io_context *ioc = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2924
  	struct cfq_io_context *cic;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2925

22e2c507c   Jens Axboe   [PATCH] Update cf...
2926
  	might_sleep_if(gfp_mask & __GFP_WAIT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2927

b5deef901   Jens Axboe   [PATCH] Make sure...
2928
  	ioc = get_io_context(gfp_mask, cfqd->queue->node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2929
2930
  	if (!ioc)
  		return NULL;
4ac845a2e   Jens Axboe   block: cfq: make ...
2931
  	cic = cfq_cic_lookup(cfqd, ioc);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2932
2933
  	if (cic)
  		goto out;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2934

e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2935
2936
2937
  	cic = cfq_alloc_io_context(cfqd, gfp_mask);
  	if (cic == NULL)
  		goto err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2938

4ac845a2e   Jens Axboe   block: cfq: make ...
2939
2940
  	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
  		goto err_free;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2941
  out:
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2942
2943
2944
  	smp_read_barrier_depends();
  	if (unlikely(ioc->ioprio_changed))
  		cfq_ioc_set_ioprio(ioc);
24610333d   Vivek Goyal   blkio: Drop the r...
2945
2946
2947
2948
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	if (unlikely(ioc->cgroup_changed))
  		cfq_ioc_set_cgroup(ioc);
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2949
  	return cic;
4ac845a2e   Jens Axboe   block: cfq: make ...
2950
2951
  err_free:
  	cfq_cic_free(cic);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2952
2953
2954
2955
  err:
  	put_io_context(ioc);
  	return NULL;
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
2956
2957
  static void
  cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2958
  {
aaf1228dd   Jens Axboe   cfq-iosched: remo...
2959
2960
  	unsigned long elapsed = jiffies - cic->last_end_request;
  	unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
db3b5848e   Kiyoshi Ueda   When cfq I/O sche...
2961

22e2c507c   Jens Axboe   [PATCH] Update cf...
2962
2963
2964
2965
  	cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
  	cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
  	cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2966

206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
2967
  static void
b2c18e1e0   Jeff Moyer   cfq: calculate th...
2968
  cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f531   Jens Axboe   cfq-iosched: deve...
2969
  		       struct request *rq)
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
2970
  {
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
2971
  	sector_t sdist = 0;
41647e7a9   Corrado Zoccolo   cfq-iosched: reth...
2972
  	sector_t n_sec = blk_rq_sectors(rq);
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
2973
2974
2975
2976
2977
2978
  	if (cfqq->last_request_pos) {
  		if (cfqq->last_request_pos < blk_rq_pos(rq))
  			sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
  		else
  			sdist = cfqq->last_request_pos - blk_rq_pos(rq);
  	}
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
2979

3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
2980
  	cfqq->seek_history <<= 1;
41647e7a9   Corrado Zoccolo   cfq-iosched: reth...
2981
2982
2983
2984
  	if (blk_queue_nonrot(cfqd->queue))
  		cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
  	else
  		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
2985
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2986

22e2c507c   Jens Axboe   [PATCH] Update cf...
2987
2988
2989
2990
2991
2992
2993
2994
  /*
   * Disable idle window if the process thinks too long or seeks so much that
   * it doesn't matter
   */
  static void
  cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  		       struct cfq_io_context *cic)
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
2995
  	int old_idle, enable_idle;
1be92f2fc   Jens Axboe   cfq-iosched: neve...
2996

0871714e0   Jens Axboe   cfq-iosched: rela...
2997
2998
2999
3000
  	/*
  	 * Don't idle for async or idle io prio class
  	 */
  	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2fc   Jens Axboe   cfq-iosched: neve...
3001
  		return;
c265a7f41   Jens Axboe   cfq-iosched: get ...
3002
  	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3003

76280aff1   Corrado Zoccolo   cfq-iosched: idli...
3004
3005
  	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
  		cfq_mark_cfqq_deep(cfqq);
749ef9f84   Corrado Zoccolo   cfq: improve fsyn...
3006
3007
3008
  	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
  		enable_idle = 0;
  	else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
3009
  	    (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
22e2c507c   Jens Axboe   [PATCH] Update cf...
3010
3011
  		enable_idle = 0;
  	else if (sample_valid(cic->ttime_samples)) {
718eee057   Corrado Zoccolo   cfq-iosched: fair...
3012
  		if (cic->ttime_mean > cfqd->cfq_slice_idle)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3013
3014
3015
  			enable_idle = 0;
  		else
  			enable_idle = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3016
  	}
7b679138b   Jens Axboe   cfq-iosched: add ...
3017
3018
3019
3020
3021
3022
3023
  	if (old_idle != enable_idle) {
  		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
  		if (enable_idle)
  			cfq_mark_cfqq_idle_window(cfqq);
  		else
  			cfq_clear_cfqq_idle_window(cfqq);
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
3024
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3025

22e2c507c   Jens Axboe   [PATCH] Update cf...
3026
3027
3028
3029
  /*
   * Check if new_cfqq should preempt the currently active queue. Return 0 for
   * no or if we aren't sure, a 1 will cause a preempt.
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3030
  static bool
22e2c507c   Jens Axboe   [PATCH] Update cf...
3031
  cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3032
  		   struct request *rq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3033
  {
6d048f531   Jens Axboe   cfq-iosched: deve...
3034
  	struct cfq_queue *cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3035

6d048f531   Jens Axboe   cfq-iosched: deve...
3036
3037
  	cfqq = cfqd->active_queue;
  	if (!cfqq)
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3038
  		return false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3039

6d048f531   Jens Axboe   cfq-iosched: deve...
3040
  	if (cfq_class_idle(new_cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3041
  		return false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3042
3043
  
  	if (cfq_class_idle(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3044
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
3045

22e2c507c   Jens Axboe   [PATCH] Update cf...
3046
  	/*
875feb63b   Divyesh Shah   cfq-iosched: Resp...
3047
3048
3049
3050
3051
3052
  	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
  	 */
  	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
  		return false;
  
  	/*
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
3053
3054
3055
  	 * if the new request is sync, but the currently running queue is
  	 * not, let the sync request have priority.
  	 */
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3056
  	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3057
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
3058

8682e1f15   Vivek Goyal   blkio: Provide so...
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
  	if (new_cfqq->cfqg != cfqq->cfqg)
  		return false;
  
  	if (cfq_slice_used(cfqq))
  		return true;
  
  	/* Allow preemption only if we are idling on sync-noidle tree */
  	if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
  	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
  	    new_cfqq->service_tree->count == 2 &&
  	    RB_EMPTY_ROOT(&cfqq->sort_list))
  		return true;
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
3071
3072
3073
3074
  	/*
  	 * So both queues are sync. Let the new request get disk time if
  	 * it's a metadata request and the current queue is doing regular IO.
  	 */
7b6d91dae   Christoph Hellwig   block: unify flag...
3075
  	if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
e6ec4fe24   Jens Axboe   cfq-iosched: fix ...
3076
  		return true;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3077

3a9a3f6cc   Divyesh Shah   cfq-iosched: Allo...
3078
3079
3080
3081
  	/*
  	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
  	 */
  	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3082
  		return true;
3a9a3f6cc   Divyesh Shah   cfq-iosched: Allo...
3083

1e3335de0   Jens Axboe   cfq-iosched: impr...
3084
  	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3085
  		return false;
1e3335de0   Jens Axboe   cfq-iosched: impr...
3086
3087
3088
3089
3090
  
  	/*
  	 * if this request is as-good as one we would expect from the
  	 * current cfqq, let it preempt
  	 */
e9ce335df   Shaohua Li   cfq-iosched: fix ...
3091
  	if (cfq_rq_close(cfqd, cfqq, rq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3092
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
3093

a6151c3a5   Jens Axboe   cfq-iosched: appl...
3094
  	return false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3095
3096
3097
3098
3099
3100
3101
3102
  }
  
  /*
   * cfqq preempts the active queue. if we allowed preempt with no slice left,
   * let it have half of its nominal slice.
   */
  static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
3103
  	cfq_log_cfqq(cfqd, cfqq, "preempt");
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3104
  	cfq_slice_expired(cfqd, 1);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3105

bf5722567   Jens Axboe   [PATCH] cfq-iosch...
3106
3107
3108
3109
3110
  	/*
  	 * Put the new queue at the front of the of the current list,
  	 * so we know that it will be selected next.
  	 */
  	BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd9   Jens Axboe   cfq-iosched: get ...
3111
3112
  
  	cfq_service_tree_add(cfqd, cfqq, 1);
bf5722567   Jens Axboe   [PATCH] cfq-iosch...
3113

44f7c1606   Jens Axboe   cfq-iosched: defe...
3114
3115
  	cfqq->slice_end = 0;
  	cfq_mark_cfqq_slice_new(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3116
3117
3118
  }
  
  /*
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3119
   * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507c   Jens Axboe   [PATCH] Update cf...
3120
3121
3122
   * something we should do about it
   */
  static void
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3123
3124
  cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  		struct request *rq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3125
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3126
  	struct cfq_io_context *cic = RQ_CIC(rq);
12e9fddd6   Jens Axboe   [PATCH] cfq-iosch...
3127

45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3128
  	cfqd->rq_queued++;
7b6d91dae   Christoph Hellwig   block: unify flag...
3129
  	if (rq->cmd_flags & REQ_META)
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
3130
  		cfqq->meta_pending++;
9c2c38a12   Jens Axboe   [PATCH] cfq-iosch...
3131
  	cfq_update_io_thinktime(cfqd, cic);
b2c18e1e0   Jeff Moyer   cfq: calculate th...
3132
  	cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a12   Jens Axboe   [PATCH] cfq-iosch...
3133
  	cfq_update_idle_window(cfqd, cfqq, cic);
b2c18e1e0   Jeff Moyer   cfq: calculate th...
3134
  	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3135
3136
3137
  
  	if (cfqq == cfqd->active_queue) {
  		/*
b029195dd   Jens Axboe   cfq-iosched: don'...
3138
3139
3140
  		 * Remember that we saw a request from this process, but
  		 * don't start queuing just yet. Otherwise we risk seeing lots
  		 * of tiny requests, because we disrupt the normal plugging
d6ceb25e8   Jens Axboe   cfq-iosched: don'...
3141
3142
  		 * and merging. If the request is already larger than a single
  		 * page, let it rip immediately. For that case we assume that
2d8707229   Jens Axboe   cfq-iosched: twea...
3143
3144
3145
  		 * merging is already done. Ditto for a busy system that
  		 * has other work pending, don't risk delaying until the
  		 * idle timer unplug to continue working.
22e2c507c   Jens Axboe   [PATCH] Update cf...
3146
  		 */
d6ceb25e8   Jens Axboe   cfq-iosched: don'...
3147
  		if (cfq_cfqq_wait_request(cfqq)) {
2d8707229   Jens Axboe   cfq-iosched: twea...
3148
3149
  			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
  			    cfqd->busy_queues > 1) {
812df48d1   Divyesh Shah   blkio: Add more d...
3150
  				cfq_del_timer(cfqd, cfqq);
554554f60   Gui Jianfeng   cfq: Remove wait_...
3151
  				cfq_clear_cfqq_wait_request(cfqq);
bf7919371   Vivek Goyal   blkio: Set must_d...
3152
  				__blk_run_queue(cfqd->queue);
a11cdaa7a   Divyesh Shah   block: Update to ...
3153
  			} else {
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3154
  				cfq_blkiocg_update_idle_time_stats(
a11cdaa7a   Divyesh Shah   block: Update to ...
3155
  						&cfqq->cfqg->blkg);
bf7919371   Vivek Goyal   blkio: Set must_d...
3156
  				cfq_mark_cfqq_must_dispatch(cfqq);
a11cdaa7a   Divyesh Shah   block: Update to ...
3157
  			}
d6ceb25e8   Jens Axboe   cfq-iosched: don'...
3158
  		}
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3159
  	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3160
3161
3162
  		/*
  		 * not the active queue - expire current slice if it is
  		 * idle and has expired it's mean thinktime or this new queue
3a9a3f6cc   Divyesh Shah   cfq-iosched: Allo...
3163
3164
  		 * has some old slice time left and is of higher priority or
  		 * this new queue is RT and the current one is BE
22e2c507c   Jens Axboe   [PATCH] Update cf...
3165
3166
  		 */
  		cfq_preempt_queue(cfqd, cfqq);
a7f557923   Tejun Heo   block: kill blk_s...
3167
  		__blk_run_queue(cfqd->queue);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3168
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3169
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3170
  static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3171
  {
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3172
  	struct cfq_data *cfqd = q->elevator->elevator_data;
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3173
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3174

7b679138b   Jens Axboe   cfq-iosched: add ...
3175
  	cfq_log_cfqq(cfqd, cfqq, "insert_request");
fd0928df9   Jens Axboe   ioprio: move io p...
3176
  	cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3177

30996f40b   Jens Axboe   cfq-iosched: fix ...
3178
  	rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3179
  	list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
3180
  	cfq_add_rq_rb(rq);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3181
  	cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
cdc1184cf   Divyesh Shah   blkio: Add io_que...
3182
3183
  			&cfqd->serving_group->blkg, rq_data_dir(rq),
  			rq_is_sync(rq));
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3184
  	cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3185
  }
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3186
3187
3188
3189
3190
3191
  /*
   * Update hw_tag based on peak queue depth over 50 samples under
   * sufficient load.
   */
  static void cfq_update_hw_tag(struct cfq_data *cfqd)
  {
1a1238a7d   Shaohua Li   cfq-iosched: impr...
3192
  	struct cfq_queue *cfqq = cfqd->active_queue;
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3193
3194
  	if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
  		cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
3195
3196
3197
  
  	if (cfqd->hw_tag == 1)
  		return;
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3198
3199
  
  	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3200
  	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3201
  		return;
1a1238a7d   Shaohua Li   cfq-iosched: impr...
3202
3203
3204
3205
3206
3207
3208
  	/*
  	 * If active queue hasn't enough requests and can idle, cfq might not
  	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
  	 * case
  	 */
  	if (cfqq && cfq_cfqq_idle_window(cfqq) &&
  	    cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3209
  	    CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
1a1238a7d   Shaohua Li   cfq-iosched: impr...
3210
  		return;
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3211
3212
  	if (cfqd->hw_tag_samples++ < 50)
  		return;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
3213
  	if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3214
3215
3216
  		cfqd->hw_tag = 1;
  	else
  		cfqd->hw_tag = 0;
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3217
  }
7667aa063   Vivek Goyal   cfq-iosched: Take...
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
  static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	struct cfq_io_context *cic = cfqd->active_cic;
  
  	/* If there are other queues in the group, don't wait */
  	if (cfqq->cfqg->nr_cfqq > 1)
  		return false;
  
  	if (cfq_slice_used(cfqq))
  		return true;
  
  	/* if slice left is less than think time, wait busy */
  	if (cic && sample_valid(cic->ttime_samples)
  	    && (cfqq->slice_end - jiffies < cic->ttime_mean))
  		return true;
  
  	/*
  	 * If think times is less than a jiffy than ttime_mean=0 and above
  	 * will not be true. It might happen that slice has not expired yet
  	 * but will expire soon (4-5 ns) during select_queue(). To cover the
  	 * case where think time is less than a jiffy, mark the queue wait
  	 * busy if only 1 jiffy is left in the slice.
  	 */
  	if (cfqq->slice_end - jiffies == 1)
  		return true;
  
  	return false;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3246
  static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3247
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3248
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3249
  	struct cfq_data *cfqd = cfqq->cfqd;
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
3250
  	const int sync = rq_is_sync(rq);
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3251
  	unsigned long now;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3252

b4878f245   Jens Axboe   [PATCH] 02/05: up...
3253
  	now = jiffies;
33659ebba   Christoph Hellwig   block: remove wra...
3254
3255
  	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
  		     !!(rq->cmd_flags & REQ_NOIDLE));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3256

45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3257
  	cfq_update_hw_tag(cfqd);
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3258
  	WARN_ON(!cfqd->rq_in_driver);
6d048f531   Jens Axboe   cfq-iosched: deve...
3259
  	WARN_ON(!cfqq->dispatched);
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3260
  	cfqd->rq_in_driver--;
6d048f531   Jens Axboe   cfq-iosched: deve...
3261
  	cfqq->dispatched--;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3262
  	(RQ_CFQG(rq))->dispatched--;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3263
3264
3265
  	cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
  			rq_start_time_ns(rq), rq_io_start_time_ns(rq),
  			rq_data_dir(rq), rq_is_sync(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3266

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3267
  	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3ed9a2965   Jens Axboe   cfq-iosched: impr...
3268

365722bb9   Vivek Goyal   cfq-iosched: dela...
3269
  	if (sync) {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3270
  		RQ_CIC(rq)->last_end_request = now;
573412b29   Corrado Zoccolo   cfq-iosched: redu...
3271
3272
  		if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
  			cfqd->last_delayed_sync = now;
365722bb9   Vivek Goyal   cfq-iosched: dela...
3273
  	}
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3274
3275
3276
3277
3278
3279
  
  	/*
  	 * If this is the active queue, check if it needs to be expired,
  	 * or if we want to idle in case it has no pending requests.
  	 */
  	if (cfqd->active_queue == cfqq) {
a36e71f99   Jens Axboe   cfq-iosched: add ...
3280
  		const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
44f7c1606   Jens Axboe   cfq-iosched: defe...
3281
3282
3283
3284
  		if (cfq_cfqq_slice_new(cfqq)) {
  			cfq_set_prio_slice(cfqd, cfqq);
  			cfq_clear_cfqq_slice_new(cfqq);
  		}
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3285
3286
  
  		/*
7667aa063   Vivek Goyal   cfq-iosched: Take...
3287
3288
  		 * Should we wait for next request to come in before we expire
  		 * the queue.
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3289
  		 */
7667aa063   Vivek Goyal   cfq-iosched: Take...
3290
  		if (cfq_should_wait_busy(cfqd, cfqq)) {
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3291
3292
3293
3294
  			unsigned long extend_sl = cfqd->cfq_slice_idle;
  			if (!cfqd->cfq_slice_idle)
  				extend_sl = cfqd->cfq_group_idle;
  			cfqq->slice_end = jiffies + extend_sl;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3295
  			cfq_mark_cfqq_wait_busy(cfqq);
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
3296
  			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3297
  		}
a36e71f99   Jens Axboe   cfq-iosched: add ...
3298
  		/*
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
3299
3300
3301
3302
3303
3304
  		 * Idling is not enabled on:
  		 * - expired queues
  		 * - idle-priority queues
  		 * - async queues
  		 * - queues with still some requests queued
  		 * - when there is a close cooperator
a36e71f99   Jens Axboe   cfq-iosched: add ...
3305
  		 */
0871714e0   Jens Axboe   cfq-iosched: rela...
3306
  		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3307
  			cfq_slice_expired(cfqd, 1);
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
3308
3309
  		else if (sync && cfqq_empty &&
  			 !cfq_close_cooperator(cfqd, cfqq)) {
749ef9f84   Corrado Zoccolo   cfq: improve fsyn...
3310
  			cfq_arm_slice_timer(cfqd);
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
3311
  		}
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3312
  	}
6d048f531   Jens Axboe   cfq-iosched: deve...
3313

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3314
  	if (!cfqd->rq_in_driver)
23e018a1b   Jens Axboe   block: get rid of...
3315
  		cfq_schedule_dispatch(cfqd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3316
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
3317
3318
3319
3320
3321
  /*
   * we temporarily boost lower priority queues if they are holding fs exclusive
   * resources. they are boosted to normal prio (CLASS_BE/4)
   */
  static void cfq_prio_boost(struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3322
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
  	if (has_fs_excl()) {
  		/*
  		 * boost idle prio on transactions that would lock out other
  		 * users of the filesystem
  		 */
  		if (cfq_class_idle(cfqq))
  			cfqq->ioprio_class = IOPRIO_CLASS_BE;
  		if (cfqq->ioprio > IOPRIO_NORM)
  			cfqq->ioprio = IOPRIO_NORM;
  	} else {
  		/*
dddb74519   Corrado Zoccolo   cfq-iosched: simp...
3334
  		 * unboost the queue (if needed)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3335
  		 */
dddb74519   Corrado Zoccolo   cfq-iosched: simp...
3336
3337
  		cfqq->ioprio_class = cfqq->org_ioprio_class;
  		cfqq->ioprio = cfqq->org_ioprio;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3338
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
3339
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3340

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
3341
  static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3342
  {
1b379d8da   Jens Axboe   cfq-iosched: get ...
3343
  	if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3344
  		cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3345
  		return ELV_MQUEUE_MUST;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3346
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3347

22e2c507c   Jens Axboe   [PATCH] Update cf...
3348
  	return ELV_MQUEUE_MAY;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3349
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3350
  static int cfq_may_queue(struct request_queue *q, int rw)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3351
3352
3353
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct task_struct *tsk = current;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3354
  	struct cfq_io_context *cic;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3355
3356
3357
3358
3359
3360
3361
3362
  	struct cfq_queue *cfqq;
  
  	/*
  	 * don't force setup of a queue from here, as a call to may_queue
  	 * does not necessarily imply that a request actually will be queued.
  	 * so just lookup a possibly existing queue, or return 'may queue'
  	 * if that fails
  	 */
4ac845a2e   Jens Axboe   block: cfq: make ...
3363
  	cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3364
3365
  	if (!cic)
  		return ELV_MQUEUE_MAY;
b0b78f81a   Jens Axboe   cfq-iosched: use ...
3366
  	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
22e2c507c   Jens Axboe   [PATCH] Update cf...
3367
  	if (cfqq) {
fd0928df9   Jens Axboe   ioprio: move io p...
3368
  		cfq_init_prio_data(cfqq, cic->ioc);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3369
  		cfq_prio_boost(cfqq);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
3370
  		return __cfq_may_queue(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3371
3372
3373
  	}
  
  	return ELV_MQUEUE_MAY;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3374
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3375
3376
3377
  /*
   * queue lock held here
   */
bb37b94c6   Jens Axboe   [BLOCK] Cleanup u...
3378
  static void cfq_put_request(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3379
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3380
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3381

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3382
  	if (cfqq) {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3383
  		const int rw = rq_data_dir(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3384

22e2c507c   Jens Axboe   [PATCH] Update cf...
3385
3386
  		BUG_ON(!cfqq->allocated[rw]);
  		cfqq->allocated[rw]--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3387

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3388
  		put_io_context(RQ_CIC(rq)->ioc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3389

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3390
  		rq->elevator_private = NULL;
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3391
  		rq->elevator_private2 = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3392

7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
3393
3394
3395
  		/* Put down rq reference on cfqg */
  		cfq_put_cfqg(RQ_CFQG(rq));
  		rq->elevator_private3 = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3396
3397
3398
  		cfq_put_queue(cfqq);
  	}
  }
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3399
3400
3401
3402
3403
3404
  static struct cfq_queue *
  cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
  		struct cfq_queue *cfqq)
  {
  	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
  	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d0408   Jeff Moyer   cfq: change the m...
3405
  	cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3406
3407
3408
  	cfq_put_queue(cfqq);
  	return cic_to_cfqq(cic, 1);
  }
e6c5bc737   Jeff Moyer   cfq: break apart ...
3409
3410
3411
3412
3413
3414
3415
3416
  /*
   * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
   * was the last process referring to said cfqq.
   */
  static struct cfq_queue *
  split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
  {
  	if (cfqq_process_refs(cfqq) == 1) {
e6c5bc737   Jeff Moyer   cfq: break apart ...
3417
3418
  		cfqq->pid = current->pid;
  		cfq_clear_cfqq_coop(cfqq);
ae54abed6   Shaohua Li   cfq-iosched: spli...
3419
  		cfq_clear_cfqq_split_coop(cfqq);
e6c5bc737   Jeff Moyer   cfq: break apart ...
3420
3421
3422
3423
  		return cfqq;
  	}
  
  	cic_set_cfqq(cic, NULL, 1);
d02a2c077   Shaohua Li   cfq-iosched: fix ...
3424
3425
  
  	cfq_put_cooperator(cfqq);
e6c5bc737   Jeff Moyer   cfq: break apart ...
3426
3427
3428
  	cfq_put_queue(cfqq);
  	return NULL;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3429
  /*
22e2c507c   Jens Axboe   [PATCH] Update cf...
3430
   * Allocate cfq data structures associated with this request.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3431
   */
22e2c507c   Jens Axboe   [PATCH] Update cf...
3432
  static int
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3433
  cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3434
3435
3436
3437
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct cfq_io_context *cic;
  	const int rw = rq_data_dir(rq);
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3438
  	const bool is_sync = rq_is_sync(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3439
  	struct cfq_queue *cfqq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3440
3441
3442
  	unsigned long flags;
  
  	might_sleep_if(gfp_mask & __GFP_WAIT);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3443
  	cic = cfq_get_io_context(cfqd, gfp_mask);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3444

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3445
  	spin_lock_irqsave(q->queue_lock, flags);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3446
3447
  	if (!cic)
  		goto queue_fail;
e6c5bc737   Jeff Moyer   cfq: break apart ...
3448
  new_queue:
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3449
  	cfqq = cic_to_cfqq(cic, is_sync);
32f2e807a   Vivek Goyal   cfq-iosched: rese...
3450
  	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
fd0928df9   Jens Axboe   ioprio: move io p...
3451
  		cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3452
  		cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3453
3454
  	} else {
  		/*
e6c5bc737   Jeff Moyer   cfq: break apart ...
3455
3456
  		 * If the queue was seeky for too long, break it apart.
  		 */
ae54abed6   Shaohua Li   cfq-iosched: spli...
3457
  		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
e6c5bc737   Jeff Moyer   cfq: break apart ...
3458
3459
3460
3461
3462
3463
3464
  			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
  			cfqq = split_cfqq(cic, cfqq);
  			if (!cfqq)
  				goto new_queue;
  		}
  
  		/*
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3465
3466
3467
3468
3469
3470
3471
  		 * Check to see if this queue is scheduled to merge with
  		 * another, closely cooperating queue.  The merging of
  		 * queues happens here as it must be done in process context.
  		 * The reference on new_cfqq was taken in merge_cfqqs.
  		 */
  		if (cfqq->new_cfqq)
  			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3472
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3473
3474
  
  	cfqq->allocated[rw]++;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3475
  	atomic_inc(&cfqq->ref);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3476

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3477
  	spin_unlock_irqrestore(q->queue_lock, flags);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3478

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3479
3480
  	rq->elevator_private = cic;
  	rq->elevator_private2 = cfqq;
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
3481
  	rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3482
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3483

22e2c507c   Jens Axboe   [PATCH] Update cf...
3484
3485
3486
  queue_fail:
  	if (cic)
  		put_io_context(cic->ioc);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
3487

23e018a1b   Jens Axboe   block: get rid of...
3488
  	cfq_schedule_dispatch(cfqd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3489
  	spin_unlock_irqrestore(q->queue_lock, flags);
7b679138b   Jens Axboe   cfq-iosched: add ...
3490
  	cfq_log(cfqd, "set_request fail");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3491
3492
  	return 1;
  }
65f27f384   David Howells   WorkStruct: Pass ...
3493
  static void cfq_kick_queue(struct work_struct *work)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3494
  {
65f27f384   David Howells   WorkStruct: Pass ...
3495
  	struct cfq_data *cfqd =
23e018a1b   Jens Axboe   block: get rid of...
3496
  		container_of(work, struct cfq_data, unplug_work);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3497
  	struct request_queue *q = cfqd->queue;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3498

40bb54d19   Jens Axboe   cfq-iosched: no n...
3499
  	spin_lock_irq(q->queue_lock);
a7f557923   Tejun Heo   block: kill blk_s...
3500
  	__blk_run_queue(cfqd->queue);
40bb54d19   Jens Axboe   cfq-iosched: no n...
3501
  	spin_unlock_irq(q->queue_lock);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
  }
  
  /*
   * Timer running if the active_queue is currently idling inside its time slice
   */
  static void cfq_idle_slice_timer(unsigned long data)
  {
  	struct cfq_data *cfqd = (struct cfq_data *) data;
  	struct cfq_queue *cfqq;
  	unsigned long flags;
3c6bd2f87   Jens Axboe   cfq-iosched: chec...
3512
  	int timed_out = 1;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3513

7b679138b   Jens Axboe   cfq-iosched: add ...
3514
  	cfq_log(cfqd, "idle timer fired");
22e2c507c   Jens Axboe   [PATCH] Update cf...
3515
  	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
fe094d98e   Jens Axboe   cfq-iosched: make...
3516
3517
  	cfqq = cfqd->active_queue;
  	if (cfqq) {
3c6bd2f87   Jens Axboe   cfq-iosched: chec...
3518
  		timed_out = 0;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3519
  		/*
b029195dd   Jens Axboe   cfq-iosched: don'...
3520
3521
3522
3523
3524
3525
  		 * We saw a request before the queue expired, let it through
  		 */
  		if (cfq_cfqq_must_dispatch(cfqq))
  			goto out_kick;
  
  		/*
22e2c507c   Jens Axboe   [PATCH] Update cf...
3526
3527
  		 * expired
  		 */
44f7c1606   Jens Axboe   cfq-iosched: defe...
3528
  		if (cfq_slice_used(cfqq))
22e2c507c   Jens Axboe   [PATCH] Update cf...
3529
3530
3531
3532
3533
3534
  			goto expire;
  
  		/*
  		 * only expire and reinvoke request handler, if there are
  		 * other queues with pending requests
  		 */
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3535
  		if (!cfqd->busy_queues)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3536
  			goto out_cont;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3537
3538
3539
3540
  
  		/*
  		 * not expired and it has a request pending, let it dispatch
  		 */
75e50984f   Jens Axboe   cfq-iosched: kill...
3541
  		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507c   Jens Axboe   [PATCH] Update cf...
3542
  			goto out_kick;
76280aff1   Corrado Zoccolo   cfq-iosched: idli...
3543
3544
3545
3546
3547
  
  		/*
  		 * Queue depth flag is reset only when the idle didn't succeed
  		 */
  		cfq_clear_cfqq_deep(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3548
3549
  	}
  expire:
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3550
  	cfq_slice_expired(cfqd, timed_out);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3551
  out_kick:
23e018a1b   Jens Axboe   block: get rid of...
3552
  	cfq_schedule_dispatch(cfqd);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3553
3554
3555
  out_cont:
  	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  }
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3556
3557
3558
  static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  {
  	del_timer_sync(&cfqd->idle_slice_timer);
23e018a1b   Jens Axboe   block: get rid of...
3559
  	cancel_work_sync(&cfqd->unplug_work);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3560
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
3561

c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3562
3563
3564
3565
3566
3567
3568
3569
3570
  static void cfq_put_async_queues(struct cfq_data *cfqd)
  {
  	int i;
  
  	for (i = 0; i < IOPRIO_BE_NR; i++) {
  		if (cfqd->async_cfqq[0][i])
  			cfq_put_queue(cfqd->async_cfqq[0][i]);
  		if (cfqd->async_cfqq[1][i])
  			cfq_put_queue(cfqd->async_cfqq[1][i]);
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3571
  	}
2389d1ef1   Oleg Nesterov   cfq: fix IOPRIO_C...
3572
3573
3574
  
  	if (cfqd->async_idle_cfqq)
  		cfq_put_queue(cfqd->async_idle_cfqq);
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3575
  }
bb729bc98   Jens Axboe   cfq-iosched: use ...
3576
3577
3578
3579
  static void cfq_cfqd_free(struct rcu_head *head)
  {
  	kfree(container_of(head, struct cfq_data, rcu));
  }
b374d18a4   Jens Axboe   block: get rid of...
3580
  static void cfq_exit_queue(struct elevator_queue *e)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3581
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3582
  	struct cfq_data *cfqd = e->elevator_data;
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3583
  	struct request_queue *q = cfqd->queue;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3584

3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3585
  	cfq_shutdown_timer_wq(cfqd);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3586

d9ff41879   Al Viro   [PATCH] make cfq_...
3587
  	spin_lock_irq(q->queue_lock);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3588

d9ff41879   Al Viro   [PATCH] make cfq_...
3589
  	if (cfqd->active_queue)
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3590
  		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3591
3592
  
  	while (!list_empty(&cfqd->cic_list)) {
d9ff41879   Al Viro   [PATCH] make cfq_...
3593
3594
3595
  		struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
  							struct cfq_io_context,
  							queue_list);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
3596
3597
  
  		__cfq_exit_single_io_context(cfqd, cic);
d9ff41879   Al Viro   [PATCH] make cfq_...
3598
  	}
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3599

c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3600
  	cfq_put_async_queues(cfqd);
b1c357696   Vivek Goyal   blkio: Take care ...
3601
  	cfq_release_cfq_groups(cfqd);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3602
  	cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
15c31be4d   Jens Axboe   cfq-iosched: fix ...
3603

d9ff41879   Al Viro   [PATCH] make cfq_...
3604
  	spin_unlock_irq(q->queue_lock);
a90d742e4   Al Viro   [PATCH] don't bot...
3605
3606
  
  	cfq_shutdown_timer_wq(cfqd);
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
3607
3608
3609
  	spin_lock(&cic_index_lock);
  	ida_remove(&cic_index_ida, cfqd->cic_index);
  	spin_unlock(&cic_index_lock);
b1c357696   Vivek Goyal   blkio: Take care ...
3610
  	/* Wait for cfqg->blkg->key accessors to exit their grace periods. */
bb729bc98   Jens Axboe   cfq-iosched: use ...
3611
  	call_rcu(&cfqd->rcu, cfq_cfqd_free);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3612
  }
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
  static int cfq_alloc_cic_index(void)
  {
  	int index, error;
  
  	do {
  		if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
  			return -ENOMEM;
  
  		spin_lock(&cic_index_lock);
  		error = ida_get_new(&cic_index_ida, &index);
  		spin_unlock(&cic_index_lock);
  		if (error && error != -EAGAIN)
  			return error;
  	} while (error);
  
  	return index;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3630
  static void *cfq_init_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3631
3632
  {
  	struct cfq_data *cfqd;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
3633
  	int i, j;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
3634
  	struct cfq_group *cfqg;
615f0259e   Vivek Goyal   blkio: Implement ...
3635
  	struct cfq_rb_root *st;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3636

80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
3637
3638
3639
  	i = cfq_alloc_cic_index();
  	if (i < 0)
  		return NULL;
94f6030ca   Christoph Lameter   Slab allocators: ...
3640
  	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3641
  	if (!cfqd)
bc1c11697   Jens Axboe   [PATCH] elevator ...
3642
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3643

80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
3644
  	cfqd->cic_index = i;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
3645
3646
  	/* Init root service tree */
  	cfqd->grp_service_tree = CFQ_RB_ROOT;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
3647
3648
  	/* Init root group */
  	cfqg = &cfqd->root_group;
615f0259e   Vivek Goyal   blkio: Implement ...
3649
3650
  	for_each_cfqg_st(cfqg, i, j, st)
  		*st = CFQ_RB_ROOT;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
3651
  	RB_CLEAR_NODE(&cfqg->rb_node);
26a2ac009   Jens Axboe   cfq-iosched: clea...
3652

25bc6b077   Vivek Goyal   blkio: Introduce ...
3653
3654
  	/* Give preference to root group over other groups */
  	cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
3655
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
b1c357696   Vivek Goyal   blkio: Take care ...
3656
3657
3658
3659
3660
  	/*
  	 * Take a reference to root group which we never drop. This is just
  	 * to make sure that cfq_put_cfqg() does not try to kfree root group
  	 */
  	atomic_set(&cfqg->ref, 1);
dcf097b24   Vivek Goyal   blk-cgroup: Fix R...
3661
  	rcu_read_lock();
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3662
3663
  	cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
  					(void *)cfqd, 0);
dcf097b24   Vivek Goyal   blk-cgroup: Fix R...
3664
  	rcu_read_unlock();
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
3665
  #endif
26a2ac009   Jens Axboe   cfq-iosched: clea...
3666
3667
3668
3669
3670
3671
3672
  	/*
  	 * Not strictly needed (since RB_ROOT just clears the node and we
  	 * zeroed cfqd on alloc), but better be safe in case someone decides
  	 * to add magic to the rb code
  	 */
  	for (i = 0; i < CFQ_PRIO_LISTS; i++)
  		cfqd->prio_trees[i] = RB_ROOT;
6118b70b3   Jens Axboe   cfq-iosched: get ...
3673
3674
3675
3676
3677
3678
3679
  	/*
  	 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
  	 * Grab a permanent reference to it, so that the normal code flow
  	 * will not attempt to free it.
  	 */
  	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
  	atomic_inc(&cfqd->oom_cfqq.ref);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
3680
  	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
6118b70b3   Jens Axboe   cfq-iosched: get ...
3681

d9ff41879   Al Viro   [PATCH] make cfq_...
3682
  	INIT_LIST_HEAD(&cfqd->cic_list);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3683

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3684
  	cfqd->queue = q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3685

22e2c507c   Jens Axboe   [PATCH] Update cf...
3686
3687
3688
  	init_timer(&cfqd->idle_slice_timer);
  	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
23e018a1b   Jens Axboe   block: get rid of...
3689
  	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3690

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3691
  	cfqd->cfq_quantum = cfq_quantum;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3692
3693
  	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3694
3695
  	cfqd->cfq_back_max = cfq_back_max;
  	cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3696
3697
3698
3699
  	cfqd->cfq_slice[0] = cfq_slice_async;
  	cfqd->cfq_slice[1] = cfq_slice_sync;
  	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  	cfqd->cfq_slice_idle = cfq_slice_idle;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3700
  	cfqd->cfq_group_idle = cfq_group_idle;
963b72fc6   Jens Axboe   cfq-iosched: rena...
3701
  	cfqd->cfq_latency = 1;
ae30c2865   Vivek Goyal   blkio: Implement ...
3702
  	cfqd->cfq_group_isolation = 0;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
3703
  	cfqd->hw_tag = -1;
edc71131c   Corrado Zoccolo   cfq-iosched: comm...
3704
3705
3706
3707
  	/*
  	 * we optimistically start assuming sync ops weren't delayed in last
  	 * second, in order to have larger depth for async operations.
  	 */
573412b29   Corrado Zoccolo   cfq-iosched: redu...
3708
  	cfqd->last_delayed_sync = jiffies - HZ;
bc1c11697   Jens Axboe   [PATCH] elevator ...
3709
  	return cfqd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3710
3711
3712
3713
  }
  
  static void cfq_slab_kill(void)
  {
d6de8be71   Jens Axboe   cfq-iosched: fix ...
3714
3715
3716
3717
  	/*
  	 * Caller already ensured that pending RCU callbacks are completed,
  	 * so we should have no busy allocations at this point.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3718
3719
3720
3721
3722
3723
3724
3725
  	if (cfq_pool)
  		kmem_cache_destroy(cfq_pool);
  	if (cfq_ioc_pool)
  		kmem_cache_destroy(cfq_ioc_pool);
  }
  
  static int __init cfq_slab_setup(void)
  {
0a31bd5f2   Christoph Lameter   KMEM_CACHE(): sim...
3726
  	cfq_pool = KMEM_CACHE(cfq_queue, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3727
3728
  	if (!cfq_pool)
  		goto fail;
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
3729
  	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3730
3731
3732
3733
3734
3735
3736
3737
  	if (!cfq_ioc_pool)
  		goto fail;
  
  	return 0;
  fail:
  	cfq_slab_kill();
  	return -ENOMEM;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3738
3739
3740
  /*
   * sysfs parts below -->
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
  static ssize_t
  cfq_var_show(unsigned int var, char *page)
  {
  	return sprintf(page, "%d
  ", var);
  }
  
  static ssize_t
  cfq_var_store(unsigned int *var, const char *page, size_t count)
  {
  	char *p = (char *) page;
  
  	*var = simple_strtoul(p, &p, 10);
  	return count;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3756
  #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
b374d18a4   Jens Axboe   block: get rid of...
3757
  static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3758
  {									\
3d1ab40f4   Al Viro   [PATCH] elevator_...
3759
  	struct cfq_data *cfqd = e->elevator_data;			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3760
3761
3762
3763
3764
3765
  	unsigned int __data = __VAR;					\
  	if (__CONV)							\
  		__data = jiffies_to_msecs(__data);			\
  	return cfq_var_show(__data, (page));				\
  }
  SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3766
3767
  SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3768
3769
  SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3770
  SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3771
  SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3772
3773
3774
  SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc6   Jens Axboe   cfq-iosched: rena...
3775
  SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
ae30c2865   Vivek Goyal   blkio: Implement ...
3776
  SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3777
3778
3779
  #undef SHOW_FUNCTION
  
  #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
b374d18a4   Jens Axboe   block: get rid of...
3780
  static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3781
  {									\
3d1ab40f4   Al Viro   [PATCH] elevator_...
3782
  	struct cfq_data *cfqd = e->elevator_data;			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
  	unsigned int __data;						\
  	int ret = cfq_var_store(&__data, (page), count);		\
  	if (__data < (MIN))						\
  		__data = (MIN);						\
  	else if (__data > (MAX))					\
  		__data = (MAX);						\
  	if (__CONV)							\
  		*(__PTR) = msecs_to_jiffies(__data);			\
  	else								\
  		*(__PTR) = __data;					\
  	return ret;							\
  }
  STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98e   Jens Axboe   cfq-iosched: make...
3796
3797
3798
3799
  STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
  		UINT_MAX, 1);
  STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
  		UINT_MAX, 1);
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3800
  STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98e   Jens Axboe   cfq-iosched: make...
3801
3802
  STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
  		UINT_MAX, 0);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3803
  STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3804
  STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3805
3806
  STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98e   Jens Axboe   cfq-iosched: make...
3807
3808
  STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
  		UINT_MAX, 0);
963b72fc6   Jens Axboe   cfq-iosched: rena...
3809
  STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
ae30c2865   Vivek Goyal   blkio: Implement ...
3810
  STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3811
  #undef STORE_FUNCTION
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3812
3813
3814
3815
3816
  #define CFQ_ATTR(name) \
  	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  
  static struct elv_fs_entry cfq_attrs[] = {
  	CFQ_ATTR(quantum),
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3817
3818
3819
3820
3821
3822
3823
3824
  	CFQ_ATTR(fifo_expire_sync),
  	CFQ_ATTR(fifo_expire_async),
  	CFQ_ATTR(back_seek_max),
  	CFQ_ATTR(back_seek_penalty),
  	CFQ_ATTR(slice_sync),
  	CFQ_ATTR(slice_async),
  	CFQ_ATTR(slice_async_rq),
  	CFQ_ATTR(slice_idle),
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3825
  	CFQ_ATTR(group_idle),
963b72fc6   Jens Axboe   cfq-iosched: rena...
3826
  	CFQ_ATTR(low_latency),
ae30c2865   Vivek Goyal   blkio: Implement ...
3827
  	CFQ_ATTR(group_isolation),
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3828
  	__ATTR_NULL
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3829
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3830
3831
3832
3833
3834
  static struct elevator_type iosched_cfq = {
  	.ops = {
  		.elevator_merge_fn = 		cfq_merge,
  		.elevator_merged_fn =		cfq_merged_request,
  		.elevator_merge_req_fn =	cfq_merged_requests,
da7752650   Jens Axboe   [PATCH] cfq-iosch...
3835
  		.elevator_allow_merge_fn =	cfq_allow_merge,
812d40264   Divyesh Shah   blkio: Add io_mer...
3836
  		.elevator_bio_merged_fn =	cfq_bio_merged,
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3837
  		.elevator_dispatch_fn =		cfq_dispatch_requests,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3838
  		.elevator_add_req_fn =		cfq_insert_request,
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3839
  		.elevator_activate_req_fn =	cfq_activate_request,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3840
3841
3842
  		.elevator_deactivate_req_fn =	cfq_deactivate_request,
  		.elevator_queue_empty_fn =	cfq_queue_empty,
  		.elevator_completed_req_fn =	cfq_completed_request,
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
3843
3844
  		.elevator_former_req_fn =	elv_rb_former_request,
  		.elevator_latter_req_fn =	elv_rb_latter_request,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3845
3846
3847
3848
3849
  		.elevator_set_req_fn =		cfq_set_request,
  		.elevator_put_req_fn =		cfq_put_request,
  		.elevator_may_queue_fn =	cfq_may_queue,
  		.elevator_init_fn =		cfq_init_queue,
  		.elevator_exit_fn =		cfq_exit_queue,
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
3850
  		.trim =				cfq_free_io_context,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3851
  	},
3d1ab40f4   Al Viro   [PATCH] elevator_...
3852
  	.elevator_attrs =	cfq_attrs,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3853
3854
3855
  	.elevator_name =	"cfq",
  	.elevator_owner =	THIS_MODULE,
  };
3e2520668   Vivek Goyal   blkio: Implement ...
3856
3857
3858
3859
3860
3861
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  static struct blkio_policy_type blkio_policy_cfq = {
  	.ops = {
  		.blkio_unlink_group_fn =	cfq_unlink_blkio_group,
  		.blkio_update_group_weight_fn =	cfq_update_blkio_group_weight,
  	},
062a644d6   Vivek Goyal   blk-cgroup: Prepa...
3862
  	.plid = BLKIO_POLICY_PROP,
3e2520668   Vivek Goyal   blkio: Implement ...
3863
3864
3865
3866
  };
  #else
  static struct blkio_policy_type blkio_policy_cfq;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3867
3868
  static int __init cfq_init(void)
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3869
3870
3871
3872
3873
3874
3875
  	/*
  	 * could be 0 on HZ < 1000 setups
  	 */
  	if (!cfq_slice_async)
  		cfq_slice_async = 1;
  	if (!cfq_slice_idle)
  		cfq_slice_idle = 1;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3876
3877
3878
3879
3880
3881
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	if (!cfq_group_idle)
  		cfq_group_idle = 1;
  #else
  		cfq_group_idle = 0;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3882
3883
  	if (cfq_slab_setup())
  		return -ENOMEM;
2fdd82bd8   Adrian Bunk   block: let elv_re...
3884
  	elv_register(&iosched_cfq);
3e2520668   Vivek Goyal   blkio: Implement ...
3885
  	blkio_policy_register(&blkio_policy_cfq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3886

2fdd82bd8   Adrian Bunk   block: let elv_re...
3887
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3888
3889
3890
3891
  }
  
  static void __exit cfq_exit(void)
  {
6e9a4738c   Peter Zijlstra   [PATCH] completio...
3892
  	DECLARE_COMPLETION_ONSTACK(all_gone);
3e2520668   Vivek Goyal   blkio: Implement ...
3893
  	blkio_policy_unregister(&blkio_policy_cfq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3894
  	elv_unregister(&iosched_cfq);
334e94de9   Al Viro   [PATCH] deal with...
3895
  	ioc_gone = &all_gone;
fba822722   OGAWA Hirofumi   [PATCH 1/2] iosch...
3896
3897
  	/* ioc_gone's update must be visible before reading ioc_count */
  	smp_wmb();
d6de8be71   Jens Axboe   cfq-iosched: fix ...
3898
3899
3900
3901
3902
  
  	/*
  	 * this also protects us from entering cfq_slab_kill() with
  	 * pending RCU callbacks
  	 */
245b2e70e   Tejun Heo   percpu: clean up ...
3903
  	if (elv_ioc_count_read(cfq_ioc_count))
9a11b4ed0   Jens Axboe   cfq-iosched: prop...
3904
  		wait_for_completion(&all_gone);
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
3905
  	ida_destroy(&cic_index_ida);
83521d3eb   Christoph Hellwig   [PATCH] cfq-iosch...
3906
  	cfq_slab_kill();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3907
3908
3909
3910
3911
3912
3913
3914
  }
  
  module_init(cfq_init);
  module_exit(cfq_exit);
  
  MODULE_AUTHOR("Jens Axboe");
  MODULE_LICENSE("GPL");
  MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");