Blame view

block/cfq-iosched.c 107 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
6
   *  CFQ, or complete fairness queueing, disk scheduler.
   *
   *  Based on ideas from a previously unfinished io
   *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   *
0fe234795   Jens Axboe   [PATCH] Update ax...
7
   *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
  #include <linux/module.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
10
  #include <linux/slab.h>
1cc9be68e   Al Viro   [PATCH] noise rem...
11
12
  #include <linux/blkdev.h>
  #include <linux/elevator.h>
ad5ebd2fa   Randy Dunlap   block: jiffies fixes
13
  #include <linux/jiffies.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
  #include <linux/rbtree.h>
22e2c507c   Jens Axboe   [PATCH] Update cf...
15
  #include <linux/ioprio.h>
7b679138b   Jens Axboe   cfq-iosched: add ...
16
  #include <linux/blktrace_api.h>
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
17
  #include "cfq.h"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
19
20
21
  
  /*
   * tunables
   */
fe094d98e   Jens Axboe   cfq-iosched: make...
22
  /* max queue in one round of service */
abc3c744d   Shaohua Li   cfq-iosched: quan...
23
  static const int cfq_quantum = 8;
64100099e   Arjan van de Ven   [BLOCK] mark some...
24
  static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
fe094d98e   Jens Axboe   cfq-iosched: make...
25
26
27
28
  /* maximum backwards seek, in KiB */
  static const int cfq_back_max = 16 * 1024;
  /* penalty of a backwards seek */
  static const int cfq_back_penalty = 2;
64100099e   Arjan van de Ven   [BLOCK] mark some...
29
  static const int cfq_slice_sync = HZ / 10;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
30
  static int cfq_slice_async = HZ / 25;
64100099e   Arjan van de Ven   [BLOCK] mark some...
31
  static const int cfq_slice_async_rq = 2;
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
32
  static int cfq_slice_idle = HZ / 125;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
33
  static int cfq_group_idle = HZ / 125;
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
34
35
  static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  static const int cfq_hist_divisor = 4;
22e2c507c   Jens Axboe   [PATCH] Update cf...
36

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
37
  /*
0871714e0   Jens Axboe   cfq-iosched: rela...
38
   * offset from end of service tree
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
39
   */
0871714e0   Jens Axboe   cfq-iosched: rela...
40
  #define CFQ_IDLE_DELAY		(HZ / 5)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
41
42
43
44
45
  
  /*
   * below this threshold, we consider thinktime immediate
   */
  #define CFQ_MIN_TT		(2)
22e2c507c   Jens Axboe   [PATCH] Update cf...
46
  #define CFQ_SLICE_SCALE		(5)
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
47
  #define CFQ_HW_QUEUE_MIN	(5)
25bc6b077   Vivek Goyal   blkio: Introduce ...
48
  #define CFQ_SERVICE_SHIFT       12
22e2c507c   Jens Axboe   [PATCH] Update cf...
49

3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
50
  #define CFQQ_SEEK_THR		(sector_t)(8 * 100)
e9ce335df   Shaohua Li   cfq-iosched: fix ...
51
  #define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)
41647e7a9   Corrado Zoccolo   cfq-iosched: reth...
52
  #define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
53
  #define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)
ae54abed6   Shaohua Li   cfq-iosched: spli...
54

fe094d98e   Jens Axboe   cfq-iosched: make...
55
  #define RQ_CIC(rq)		\
c186794db   Mike Snitzer   block: share requ...
56
57
58
  	((struct cfq_io_context *) (rq)->elevator_private[0])
  #define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elevator_private[1])
  #define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elevator_private[2])
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
59

e18b890bb   Christoph Lameter   [PATCH] slab: rem...
60
61
  static struct kmem_cache *cfq_pool;
  static struct kmem_cache *cfq_ioc_pool;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
62

245b2e70e   Tejun Heo   percpu: clean up ...
63
  static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
334e94de9   Al Viro   [PATCH] deal with...
64
  static struct completion *ioc_gone;
9a11b4ed0   Jens Axboe   cfq-iosched: prop...
65
  static DEFINE_SPINLOCK(ioc_gone_lock);
334e94de9   Al Viro   [PATCH] deal with...
66

80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
67
68
  static DEFINE_SPINLOCK(cic_index_lock);
  static DEFINE_IDA(cic_index_ida);
22e2c507c   Jens Axboe   [PATCH] Update cf...
69
70
  #define CFQ_PRIO_LISTS		IOPRIO_BE_NR
  #define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507c   Jens Axboe   [PATCH] Update cf...
71
  #define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
72
  #define sample_valid(samples)	((samples) > 80)
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
73
  #define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
74

22e2c507c   Jens Axboe   [PATCH] Update cf...
75
  /*
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
76
77
78
79
80
81
82
83
   * Most of our rbtree usage is for sorting with min extraction, so
   * if we cache the leftmost node we don't have to walk down the tree
   * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
   * move this into the elevator for the rq sorting as well.
   */
  struct cfq_rb_root {
  	struct rb_root rb;
  	struct rb_node *left;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
84
  	unsigned count;
73e9ffdd0   Richard Kennedy   cfq: remove 8 byt...
85
  	unsigned total_weight;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
86
  	u64 min_vdisktime;
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
87
  	struct cfq_ttime ttime;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
88
  };
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
89
90
  #define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, \
  			.ttime = {.last_end_request = jiffies,},}
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
91
92
  
  /*
6118b70b3   Jens Axboe   cfq-iosched: get ...
93
94
95
96
   * Per process-grouping structure
   */
  struct cfq_queue {
  	/* reference count */
30d7b9448   Shaohua Li   block cfq: don't ...
97
  	int ref;
6118b70b3   Jens Axboe   cfq-iosched: get ...
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
  	/* various state flags, see below */
  	unsigned int flags;
  	/* parent cfq_data */
  	struct cfq_data *cfqd;
  	/* service_tree member */
  	struct rb_node rb_node;
  	/* service_tree key */
  	unsigned long rb_key;
  	/* prio tree member */
  	struct rb_node p_node;
  	/* prio tree root we belong to, if any */
  	struct rb_root *p_root;
  	/* sorted list of pending requests */
  	struct rb_root sort_list;
  	/* if fifo isn't expired, next request to serve */
  	struct request *next_rq;
  	/* requests queued in sort_list */
  	int queued[2];
  	/* currently allocated requests */
  	int allocated[2];
  	/* fifo list of requests in sort_list */
  	struct list_head fifo;
dae739ebc   Vivek Goyal   blkio: Group time...
120
121
  	/* time when queue got scheduled in to dispatch first request. */
  	unsigned long dispatch_start;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
122
  	unsigned int allocated_slice;
c4081ba5c   Richard Kennedy   cfq: reorder cfq_...
123
  	unsigned int slice_dispatch;
dae739ebc   Vivek Goyal   blkio: Group time...
124
125
  	/* time when first request from queue completed and slice started. */
  	unsigned long slice_start;
6118b70b3   Jens Axboe   cfq-iosched: get ...
126
127
  	unsigned long slice_end;
  	long slice_resid;
6118b70b3   Jens Axboe   cfq-iosched: get ...
128

6118b70b3   Jens Axboe   cfq-iosched: get ...
129
130
131
132
133
  	/* number of requests that are on the dispatch list or inside driver */
  	int dispatched;
  
  	/* io prio of this group */
  	unsigned short ioprio, org_ioprio;
4aede84b3   Justin TerAvest   fixlet: Remove fs...
134
  	unsigned short ioprio_class;
6118b70b3   Jens Axboe   cfq-iosched: get ...
135

c4081ba5c   Richard Kennedy   cfq: reorder cfq_...
136
  	pid_t pid;
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
137
  	u32 seek_history;
b2c18e1e0   Jeff Moyer   cfq: calculate th...
138
  	sector_t last_request_pos;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
139
  	struct cfq_rb_root *service_tree;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
140
  	struct cfq_queue *new_cfqq;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
141
  	struct cfq_group *cfqg;
c4e7893eb   Vivek Goyal   cfq-iosched: blkt...
142
143
  	/* Number of sectors dispatched from queue in single dispatch round */
  	unsigned long nr_sectors;
6118b70b3   Jens Axboe   cfq-iosched: get ...
144
145
146
  };
  
  /*
718eee057   Corrado Zoccolo   cfq-iosched: fair...
147
   * First index in the service_trees.
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
148
149
150
   * IDLE is handled separately, so it has negative index
   */
  enum wl_prio_t {
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
151
  	BE_WORKLOAD = 0,
615f0259e   Vivek Goyal   blkio: Implement ...
152
153
  	RT_WORKLOAD = 1,
  	IDLE_WORKLOAD = 2,
b4627321e   Vivek Goyal   cfq-iosched: Fix ...
154
  	CFQ_PRIO_NR,
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
155
156
157
  };
  
  /*
718eee057   Corrado Zoccolo   cfq-iosched: fair...
158
159
160
161
162
163
164
   * Second index in the service_trees.
   */
  enum wl_type_t {
  	ASYNC_WORKLOAD = 0,
  	SYNC_NOIDLE_WORKLOAD = 1,
  	SYNC_WORKLOAD = 2
  };
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
165
166
  /* This is per cgroup per device grouping structure */
  struct cfq_group {
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
167
168
169
170
171
  	/* group service_tree member */
  	struct rb_node rb_node;
  
  	/* group service_tree key */
  	u64 vdisktime;
25bc6b077   Vivek Goyal   blkio: Introduce ...
172
  	unsigned int weight;
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
173
174
  	unsigned int new_weight;
  	bool needs_update;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
175
176
177
  
  	/* number of cfqq currently on this group */
  	int nr_cfqq;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
178
  	/*
4495a7d41   Kyungmin Park   CFQ: Fix typo and...
179
  	 * Per group busy queues average. Useful for workload slice calc. We
b4627321e   Vivek Goyal   cfq-iosched: Fix ...
180
181
182
183
184
185
186
187
188
189
190
  	 * create the array for each prio class but at run time it is used
  	 * only for RT and BE class and slot for IDLE class remains unused.
  	 * This is primarily done to avoid confusion and a gcc warning.
  	 */
  	unsigned int busy_queues_avg[CFQ_PRIO_NR];
  	/*
  	 * rr lists of queues with requests. We maintain service trees for
  	 * RT and BE classes. These trees are subdivided in subclasses
  	 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
  	 * class there is no subclassification and all the cfq queues go on
  	 * a single tree service_tree_idle.
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
191
192
193
194
  	 * Counts are embedded in the cfq_rb_root
  	 */
  	struct cfq_rb_root service_trees[2][3];
  	struct cfq_rb_root service_tree_idle;
dae739ebc   Vivek Goyal   blkio: Group time...
195
196
197
198
  
  	unsigned long saved_workload_slice;
  	enum wl_type_t saved_workload;
  	enum wl_prio_t saved_serving_prio;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
199
200
201
  	struct blkio_group blkg;
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	struct hlist_node cfqd_node;
329a67815   Shaohua Li   block cfq: don't ...
202
  	int ref;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
203
  #endif
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
204
205
  	/* number of requests that are on the dispatch list or inside driver */
  	int dispatched;
7700fc4f6   Shaohua Li   CFQ: add think ti...
206
  	struct cfq_ttime ttime;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
207
  };
718eee057   Corrado Zoccolo   cfq-iosched: fair...
208
209
  
  /*
22e2c507c   Jens Axboe   [PATCH] Update cf...
210
211
   * Per block device queue structure
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
212
  struct cfq_data {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
213
  	struct request_queue *queue;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
214
215
  	/* Root service tree for cfq_groups */
  	struct cfq_rb_root grp_service_tree;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
216
  	struct cfq_group root_group;
22e2c507c   Jens Axboe   [PATCH] Update cf...
217
218
  
  	/*
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
219
  	 * The priority currently being served
22e2c507c   Jens Axboe   [PATCH] Update cf...
220
  	 */
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
221
  	enum wl_prio_t serving_prio;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
222
223
  	enum wl_type_t serving_type;
  	unsigned long workload_expires;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
224
  	struct cfq_group *serving_group;
a36e71f99   Jens Axboe   cfq-iosched: add ...
225
226
227
228
229
230
231
  
  	/*
  	 * Each priority tree is sorted by next_request position.  These
  	 * trees are used when determining if two or more queues are
  	 * interleaving requests (see cfq_close_cooperator).
  	 */
  	struct rb_root prio_trees[CFQ_PRIO_LISTS];
22e2c507c   Jens Axboe   [PATCH] Update cf...
232
  	unsigned int busy_queues;
ef8a41df8   Shaohua Li   cfq-iosched: give...
233
  	unsigned int busy_sync_queues;
22e2c507c   Jens Axboe   [PATCH] Update cf...
234

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
235
236
  	int rq_in_driver;
  	int rq_in_flight[2];
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
237
238
239
240
241
  
  	/*
  	 * queue-depth detection
  	 */
  	int rq_queued;
25776e359   Jens Axboe   [PATCH] cfq-iosch...
242
  	int hw_tag;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
243
244
245
246
247
248
249
250
  	/*
  	 * hw_tag can be
  	 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
  	 *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
  	 *  0 => no NCQ
  	 */
  	int hw_tag_est_depth;
  	unsigned int hw_tag_samples;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
251

22e2c507c   Jens Axboe   [PATCH] Update cf...
252
  	/*
22e2c507c   Jens Axboe   [PATCH] Update cf...
253
254
255
  	 * idle window management
  	 */
  	struct timer_list idle_slice_timer;
23e018a1b   Jens Axboe   block: get rid of...
256
  	struct work_struct unplug_work;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
257

22e2c507c   Jens Axboe   [PATCH] Update cf...
258
259
  	struct cfq_queue *active_queue;
  	struct cfq_io_context *active_cic;
22e2c507c   Jens Axboe   [PATCH] Update cf...
260

c2dea2d1f   Vasily Tarasov   cfq: async queue ...
261
262
263
264
265
  	/*
  	 * async queue for each priority case
  	 */
  	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
  	struct cfq_queue *async_idle_cfqq;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
266

6d048f531   Jens Axboe   cfq-iosched: deve...
267
  	sector_t last_position;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
268

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
269
270
271
272
  	/*
  	 * tunables, see top of file
  	 */
  	unsigned int cfq_quantum;
22e2c507c   Jens Axboe   [PATCH] Update cf...
273
  	unsigned int cfq_fifo_expire[2];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
274
275
  	unsigned int cfq_back_penalty;
  	unsigned int cfq_back_max;
22e2c507c   Jens Axboe   [PATCH] Update cf...
276
277
278
  	unsigned int cfq_slice[2];
  	unsigned int cfq_slice_async_rq;
  	unsigned int cfq_slice_idle;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
279
  	unsigned int cfq_group_idle;
963b72fc6   Jens Axboe   cfq-iosched: rena...
280
  	unsigned int cfq_latency;
d9ff41879   Al Viro   [PATCH] make cfq_...
281

80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
282
  	unsigned int cic_index;
d9ff41879   Al Viro   [PATCH] make cfq_...
283
  	struct list_head cic_list;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
284

6118b70b3   Jens Axboe   cfq-iosched: get ...
285
286
287
288
  	/*
  	 * Fallback dummy cfqq for extreme OOM conditions
  	 */
  	struct cfq_queue oom_cfqq;
365722bb9   Vivek Goyal   cfq-iosched: dela...
289

573412b29   Corrado Zoccolo   cfq-iosched: redu...
290
  	unsigned long last_delayed_sync;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
291
292
293
  
  	/* List of cfq groups being managed on this device*/
  	struct hlist_head cfqg_list;
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
294
295
296
  
  	/* Number of groups which are on blkcg->blkg_list */
  	unsigned int nr_blkcg_linked_grps;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
297
  };
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
298
  static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
299
300
  static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
  					    enum wl_prio_t prio,
65b32a573   Vivek Goyal   cfq-iosched: Remo...
301
  					    enum wl_type_t type)
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
302
  {
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
303
304
  	if (!cfqg)
  		return NULL;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
305
  	if (prio == IDLE_WORKLOAD)
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
306
  		return &cfqg->service_tree_idle;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
307

cdb16e8f7   Vivek Goyal   blkio: Introduce ...
308
  	return &cfqg->service_trees[prio][type];
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
309
  }
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
310
  enum cfqq_state_flags {
b0b8d7494   Jens Axboe   cfq-iosched: docu...
311
312
  	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
  	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
b029195dd   Jens Axboe   cfq-iosched: don'...
313
  	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
b0b8d7494   Jens Axboe   cfq-iosched: docu...
314
  	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
b0b8d7494   Jens Axboe   cfq-iosched: docu...
315
316
317
  	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
  	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
  	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
44f7c1606   Jens Axboe   cfq-iosched: defe...
318
  	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
91fac317a   Vasily Tarasov   cfq-iosched: get ...
319
  	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
b3b6d0408   Jeff Moyer   cfq: change the m...
320
  	CFQ_CFQQ_FLAG_coop,		/* cfqq is shared */
ae54abed6   Shaohua Li   cfq-iosched: spli...
321
  	CFQ_CFQQ_FLAG_split_coop,	/* shared cfqq will be splitted */
76280aff1   Corrado Zoccolo   cfq-iosched: idli...
322
  	CFQ_CFQQ_FLAG_deep,		/* sync cfqq experienced large depth */
f75edf2dc   Vivek Goyal   blkio: Wait for c...
323
  	CFQ_CFQQ_FLAG_wait_busy,	/* Waiting for next request */
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
324
325
326
327
328
  };
  
  #define CFQ_CFQQ_FNS(name)						\
  static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
  {									\
fe094d98e   Jens Axboe   cfq-iosched: make...
329
  	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
330
331
332
  }									\
  static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
  {									\
fe094d98e   Jens Axboe   cfq-iosched: make...
333
  	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
334
335
336
  }									\
  static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
  {									\
fe094d98e   Jens Axboe   cfq-iosched: make...
337
  	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
338
339
340
341
  }
  
  CFQ_CFQQ_FNS(on_rr);
  CFQ_CFQQ_FNS(wait_request);
b029195dd   Jens Axboe   cfq-iosched: don'...
342
  CFQ_CFQQ_FNS(must_dispatch);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
343
  CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
344
345
346
  CFQ_CFQQ_FNS(fifo_expire);
  CFQ_CFQQ_FNS(idle_window);
  CFQ_CFQQ_FNS(prio_changed);
44f7c1606   Jens Axboe   cfq-iosched: defe...
347
  CFQ_CFQQ_FNS(slice_new);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
348
  CFQ_CFQQ_FNS(sync);
a36e71f99   Jens Axboe   cfq-iosched: add ...
349
  CFQ_CFQQ_FNS(coop);
ae54abed6   Shaohua Li   cfq-iosched: spli...
350
  CFQ_CFQQ_FNS(split_coop);
76280aff1   Corrado Zoccolo   cfq-iosched: idli...
351
  CFQ_CFQQ_FNS(deep);
f75edf2dc   Vivek Goyal   blkio: Wait for c...
352
  CFQ_CFQQ_FNS(wait_busy);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
353
  #undef CFQ_CFQQ_FNS
afc24d49c   Vivek Goyal   blk-cgroup: confi...
354
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
2868ef7b3   Vivek Goyal   blkio: Some debug...
355
356
357
  #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
  	blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
  			cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
4495a7d41   Kyungmin Park   CFQ: Fix typo and...
358
  			blkg_path(&(cfqq)->cfqg->blkg), ##args)
2868ef7b3   Vivek Goyal   blkio: Some debug...
359
360
361
  
  #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)				\
  	blk_add_trace_msg((cfqd)->queue, "%s " fmt,			\
4495a7d41   Kyungmin Park   CFQ: Fix typo and...
362
  				blkg_path(&(cfqg)->blkg), ##args)       \
2868ef7b3   Vivek Goyal   blkio: Some debug...
363
364
  
  #else
7b679138b   Jens Axboe   cfq-iosched: add ...
365
366
  #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
  	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
4495a7d41   Kyungmin Park   CFQ: Fix typo and...
367
  #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
2868ef7b3   Vivek Goyal   blkio: Some debug...
368
  #endif
7b679138b   Jens Axboe   cfq-iosched: add ...
369
370
  #define cfq_log(cfqd, fmt, args...)	\
  	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
615f0259e   Vivek Goyal   blkio: Implement ...
371
372
373
374
375
376
377
378
379
  /* Traverses through cfq group service trees */
  #define for_each_cfqg_st(cfqg, i, j, st) \
  	for (i = 0; i <= IDLE_WORKLOAD; i++) \
  		for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
  			: &cfqg->service_tree_idle; \
  			(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
  			(i == IDLE_WORKLOAD && j == 0); \
  			j++, st = i < IDLE_WORKLOAD ? \
  			&cfqg->service_trees[i][j]: NULL) \
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
380
381
382
383
384
385
386
387
388
389
390
391
  static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
  	struct cfq_ttime *ttime, bool group_idle)
  {
  	unsigned long slice;
  	if (!sample_valid(ttime->ttime_samples))
  		return false;
  	if (group_idle)
  		slice = cfqd->cfq_group_idle;
  	else
  		slice = cfqd->cfq_slice_idle;
  	return ttime->ttime_mean > slice;
  }
615f0259e   Vivek Goyal   blkio: Implement ...
392

02b35081f   Vivek Goyal   cfq-iosched: Do g...
393
394
395
396
397
398
399
400
401
402
403
404
405
406
  static inline bool iops_mode(struct cfq_data *cfqd)
  {
  	/*
  	 * If we are not idling on queues and it is a NCQ drive, parallel
  	 * execution of requests is on and measuring time is not possible
  	 * in most of the cases until and unless we drive shallower queue
  	 * depths and that becomes a performance bottleneck. In such cases
  	 * switch to start providing fairness in terms of number of IOs.
  	 */
  	if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
  		return true;
  	else
  		return false;
  }
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
407
408
409
410
411
412
413
414
  static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
  {
  	if (cfq_class_idle(cfqq))
  		return IDLE_WORKLOAD;
  	if (cfq_class_rt(cfqq))
  		return RT_WORKLOAD;
  	return BE_WORKLOAD;
  }
718eee057   Corrado Zoccolo   cfq-iosched: fair...
415
416
417
418
419
420
421
422
423
  
  static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
  {
  	if (!cfq_cfqq_sync(cfqq))
  		return ASYNC_WORKLOAD;
  	if (!cfq_cfqq_idle_window(cfqq))
  		return SYNC_NOIDLE_WORKLOAD;
  	return SYNC_WORKLOAD;
  }
58ff82f34   Vivek Goyal   blkio: Implement ...
424
425
426
  static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
  					struct cfq_data *cfqd,
  					struct cfq_group *cfqg)
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
427
428
  {
  	if (wl == IDLE_WORKLOAD)
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
429
  		return cfqg->service_tree_idle.count;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
430

cdb16e8f7   Vivek Goyal   blkio: Introduce ...
431
432
433
  	return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
  		+ cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
  		+ cfqg->service_trees[wl][SYNC_WORKLOAD].count;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
434
  }
f26bd1f0a   Vivek Goyal   blkio: Determine ...
435
436
437
438
439
440
  static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
  					struct cfq_group *cfqg)
  {
  	return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
  		+ cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
441
  static void cfq_dispatch_insert(struct request_queue *, struct request *);
a6151c3a5   Jens Axboe   cfq-iosched: appl...
442
  static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
fd0928df9   Jens Axboe   ioprio: move io p...
443
  				       struct io_context *, gfp_t);
4ac845a2e   Jens Axboe   block: cfq: make ...
444
  static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
91fac317a   Vasily Tarasov   cfq-iosched: get ...
445
446
447
  						struct io_context *);
  
  static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
a6151c3a5   Jens Axboe   cfq-iosched: appl...
448
  					    bool is_sync)
91fac317a   Vasily Tarasov   cfq-iosched: get ...
449
  {
a6151c3a5   Jens Axboe   cfq-iosched: appl...
450
  	return cic->cfqq[is_sync];
91fac317a   Vasily Tarasov   cfq-iosched: get ...
451
452
453
  }
  
  static inline void cic_set_cfqq(struct cfq_io_context *cic,
a6151c3a5   Jens Axboe   cfq-iosched: appl...
454
  				struct cfq_queue *cfqq, bool is_sync)
91fac317a   Vasily Tarasov   cfq-iosched: get ...
455
  {
a6151c3a5   Jens Axboe   cfq-iosched: appl...
456
  	cic->cfqq[is_sync] = cfqq;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
457
  }
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
458
  #define CIC_DEAD_KEY	1ul
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
459
  #define CIC_DEAD_INDEX_SHIFT	1
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
460
461
462
  
  static inline void *cfqd_dead_key(struct cfq_data *cfqd)
  {
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
463
  	return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
464
465
466
467
468
469
470
471
472
473
474
  }
  
  static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
  {
  	struct cfq_data *cfqd = cic->key;
  
  	if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
  		return NULL;
  
  	return cfqd;
  }
91fac317a   Vasily Tarasov   cfq-iosched: get ...
475
476
477
478
  /*
   * We regard a request as SYNC, if it's either a read or has the SYNC bit
   * set (in which case it could also be direct WRITE).
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
479
  static inline bool cfq_bio_sync(struct bio *bio)
91fac317a   Vasily Tarasov   cfq-iosched: get ...
480
  {
7b6d91dae   Christoph Hellwig   block: unify flag...
481
  	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
482
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
483

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
484
  /*
99f95e528   Andrew Morton   [PATCH] cfq build...
485
486
487
   * scheduler run of queue, if there are requests pending and no one in the
   * driver that will restart queueing
   */
23e018a1b   Jens Axboe   block: get rid of...
488
  static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e528   Andrew Morton   [PATCH] cfq build...
489
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
490
491
  	if (cfqd->busy_queues) {
  		cfq_log(cfqd, "schedule dispatch");
23e018a1b   Jens Axboe   block: get rid of...
492
  		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
7b679138b   Jens Axboe   cfq-iosched: add ...
493
  	}
99f95e528   Andrew Morton   [PATCH] cfq build...
494
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
495
  /*
44f7c1606   Jens Axboe   cfq-iosched: defe...
496
497
498
499
   * Scale schedule slice based on io priority. Use the sync time slice only
   * if a queue is marked sync and has sync io queued. A sync queue with async
   * io only, should not get full sync slice length.
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
500
  static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
501
  				 unsigned short prio)
44f7c1606   Jens Axboe   cfq-iosched: defe...
502
  {
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
503
  	const int base_slice = cfqd->cfq_slice[sync];
44f7c1606   Jens Axboe   cfq-iosched: defe...
504

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
505
506
507
508
  	WARN_ON(prio >= IOPRIO_BE_NR);
  
  	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
  }
44f7c1606   Jens Axboe   cfq-iosched: defe...
509

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
510
511
512
513
  static inline int
  cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c1606   Jens Axboe   cfq-iosched: defe...
514
  }
25bc6b077   Vivek Goyal   blkio: Introduce ...
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
  static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
  {
  	u64 d = delta << CFQ_SERVICE_SHIFT;
  
  	d = d * BLKIO_WEIGHT_DEFAULT;
  	do_div(d, cfqg->weight);
  	return d;
  }
  
  static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
  {
  	s64 delta = (s64)(vdisktime - min_vdisktime);
  	if (delta > 0)
  		min_vdisktime = vdisktime;
  
  	return min_vdisktime;
  }
  
  static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
  {
  	s64 delta = (s64)(vdisktime - min_vdisktime);
  	if (delta < 0)
  		min_vdisktime = vdisktime;
  
  	return min_vdisktime;
  }
  
  static void update_min_vdisktime(struct cfq_rb_root *st)
  {
25bc6b077   Vivek Goyal   blkio: Introduce ...
544
  	struct cfq_group *cfqg;
25bc6b077   Vivek Goyal   blkio: Introduce ...
545
546
  	if (st->left) {
  		cfqg = rb_entry_cfqg(st->left);
a60327107   Gui Jianfeng   cfq-iosched: Fix ...
547
548
  		st->min_vdisktime = max_vdisktime(st->min_vdisktime,
  						  cfqg->vdisktime);
25bc6b077   Vivek Goyal   blkio: Introduce ...
549
  	}
25bc6b077   Vivek Goyal   blkio: Introduce ...
550
  }
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
551
552
553
554
555
  /*
   * get averaged number of queues of RT/BE priority.
   * average is updated, with a formula that gives more weight to higher numbers,
   * to quickly follows sudden increases and decrease slowly
   */
58ff82f34   Vivek Goyal   blkio: Implement ...
556
557
  static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
  					struct cfq_group *cfqg, bool rt)
5869619cb   Jens Axboe   cfq-iosched: fix ...
558
  {
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
559
560
561
  	unsigned min_q, max_q;
  	unsigned mult  = cfq_hist_divisor - 1;
  	unsigned round = cfq_hist_divisor / 2;
58ff82f34   Vivek Goyal   blkio: Implement ...
562
  	unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
563

58ff82f34   Vivek Goyal   blkio: Implement ...
564
565
566
  	min_q = min(cfqg->busy_queues_avg[rt], busy);
  	max_q = max(cfqg->busy_queues_avg[rt], busy);
  	cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
567
  		cfq_hist_divisor;
58ff82f34   Vivek Goyal   blkio: Implement ...
568
569
570
571
572
573
574
575
576
  	return cfqg->busy_queues_avg[rt];
  }
  
  static inline unsigned
  cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
  
  	return cfq_target_latency * cfqg->weight / st->total_weight;
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
577
  }
c553f8e33   Shaohua Li   block cfq: compen...
578
  static inline unsigned
ba5bd520f   Vivek Goyal   cfq: rename a fun...
579
  cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
44f7c1606   Jens Axboe   cfq-iosched: defe...
580
  {
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
581
582
  	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
  	if (cfqd->cfq_latency) {
58ff82f34   Vivek Goyal   blkio: Implement ...
583
584
585
586
587
588
  		/*
  		 * interested queues (we consider only the ones with the same
  		 * priority class in the cfq group)
  		 */
  		unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
  						cfq_class_rt(cfqq));
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
589
590
  		unsigned sync_slice = cfqd->cfq_slice[1];
  		unsigned expect_latency = sync_slice * iq;
58ff82f34   Vivek Goyal   blkio: Implement ...
591
592
593
  		unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
  
  		if (expect_latency > group_slice) {
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
594
595
596
597
598
599
600
  			unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
  			/* scale low_slice according to IO priority
  			 * and sync vs async */
  			unsigned low_slice =
  				min(slice, base_low_slice * slice / sync_slice);
  			/* the adapted slice value is scaled to fit all iqs
  			 * into the target latency */
58ff82f34   Vivek Goyal   blkio: Implement ...
601
  			slice = max(slice * group_slice / expect_latency,
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
602
603
604
  				    low_slice);
  		}
  	}
c553f8e33   Shaohua Li   block cfq: compen...
605
606
607
608
609
610
  	return slice;
  }
  
  static inline void
  cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
ba5bd520f   Vivek Goyal   cfq: rename a fun...
611
  	unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e33   Shaohua Li   block cfq: compen...
612

dae739ebc   Vivek Goyal   blkio: Group time...
613
  	cfqq->slice_start = jiffies;
5db5d6427   Corrado Zoccolo   cfq-iosched: adap...
614
  	cfqq->slice_end = jiffies + slice;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
615
  	cfqq->allocated_slice = slice;
7b679138b   Jens Axboe   cfq-iosched: add ...
616
  	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
44f7c1606   Jens Axboe   cfq-iosched: defe...
617
618
619
620
621
622
623
  }
  
  /*
   * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
   * isn't valid until the first request from the dispatch is activated
   * and the slice time set.
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
624
  static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c1606   Jens Axboe   cfq-iosched: defe...
625
626
  {
  	if (cfq_cfqq_slice_new(cfqq))
c1e44756f   Shaohua Li   cfq-iosched: do c...
627
  		return false;
44f7c1606   Jens Axboe   cfq-iosched: defe...
628
  	if (time_before(jiffies, cfqq->slice_end))
c1e44756f   Shaohua Li   cfq-iosched: do c...
629
  		return false;
44f7c1606   Jens Axboe   cfq-iosched: defe...
630

c1e44756f   Shaohua Li   cfq-iosched: do c...
631
  	return true;
44f7c1606   Jens Axboe   cfq-iosched: defe...
632
633
634
  }
  
  /*
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
635
   * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
636
   * We choose the request that is closest to the head right now. Distance
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
637
   * behind the head is penalized and only allowed to a certain extent.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
638
   */
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
639
  static struct request *
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
640
  cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
641
  {
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
642
  	sector_t s1, s2, d1 = 0, d2 = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
643
  	unsigned long back_max;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
644
645
646
  #define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
  #define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
  	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
647

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
648
649
650
651
  	if (rq1 == NULL || rq1 == rq2)
  		return rq2;
  	if (rq2 == NULL)
  		return rq1;
9c2c38a12   Jens Axboe   [PATCH] cfq-iosch...
652

229836bd6   Namhyung Kim   cfq-iosched: redu...
653
654
  	if (rq_is_sync(rq1) != rq_is_sync(rq2))
  		return rq_is_sync(rq1) ? rq1 : rq2;
83096ebf1   Tejun Heo   block: convert to...
655
656
  	s1 = blk_rq_pos(rq1);
  	s2 = blk_rq_pos(rq2);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
657

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
  	/*
  	 * by definition, 1KiB is 2 sectors
  	 */
  	back_max = cfqd->cfq_back_max * 2;
  
  	/*
  	 * Strict one way elevator _except_ in the case where we allow
  	 * short backward seeks which are biased as twice the cost of a
  	 * similar forward seek.
  	 */
  	if (s1 >= last)
  		d1 = s1 - last;
  	else if (s1 + back_max >= last)
  		d1 = (last - s1) * cfqd->cfq_back_penalty;
  	else
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
673
  		wrap |= CFQ_RQ1_WRAP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
674
675
676
677
678
679
  
  	if (s2 >= last)
  		d2 = s2 - last;
  	else if (s2 + back_max >= last)
  		d2 = (last - s2) * cfqd->cfq_back_penalty;
  	else
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
680
  		wrap |= CFQ_RQ2_WRAP;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
681
682
  
  	/* Found required data */
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
683
684
685
686
687
688
  
  	/*
  	 * By doing switch() on the bit mask "wrap" we avoid having to
  	 * check two variables for all permutations: --> faster!
  	 */
  	switch (wrap) {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
689
  	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
690
  		if (d1 < d2)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
691
  			return rq1;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
692
  		else if (d2 < d1)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
693
  			return rq2;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
694
695
  		else {
  			if (s1 >= s2)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
696
  				return rq1;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
697
  			else
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
698
  				return rq2;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
699
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
700

e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
701
  	case CFQ_RQ2_WRAP:
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
702
  		return rq1;
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
703
  	case CFQ_RQ1_WRAP:
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
704
705
  		return rq2;
  	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053e   Andreas Mohr   [PATCH] cfq-iosch...
706
707
708
709
710
711
712
713
  	default:
  		/*
  		 * Since both rqs are wrapped,
  		 * start with the one that's further behind head
  		 * (--> only *one* back seek required),
  		 * since back seek takes more time than forward.
  		 */
  		if (s1 <= s2)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
714
  			return rq1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
715
  		else
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
716
  			return rq2;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
717
718
  	}
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
719
720
721
  /*
   * The below is leftmost cache rbtree addon
   */
0871714e0   Jens Axboe   cfq-iosched: rela...
722
  static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
723
  {
615f0259e   Vivek Goyal   blkio: Implement ...
724
725
726
  	/* Service tree is empty */
  	if (!root->count)
  		return NULL;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
727
728
  	if (!root->left)
  		root->left = rb_first(&root->rb);
0871714e0   Jens Axboe   cfq-iosched: rela...
729
730
731
732
  	if (root->left)
  		return rb_entry(root->left, struct cfq_queue, rb_node);
  
  	return NULL;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
733
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
734
735
736
737
738
739
740
741
742
743
  static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
  {
  	if (!root->left)
  		root->left = rb_first(&root->rb);
  
  	if (root->left)
  		return rb_entry_cfqg(root->left);
  
  	return NULL;
  }
a36e71f99   Jens Axboe   cfq-iosched: add ...
744
745
746
747
748
  static void rb_erase_init(struct rb_node *n, struct rb_root *root)
  {
  	rb_erase(n, root);
  	RB_CLEAR_NODE(n);
  }
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
749
750
751
752
  static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
  {
  	if (root->left == n)
  		root->left = NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
753
  	rb_erase_init(n, &root->rb);
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
754
  	--root->count;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
755
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
756
757
758
  /*
   * would be nice to take fifo expire time into account as well
   */
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
759
760
761
  static struct request *
  cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  		  struct request *last)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
762
  {
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
763
764
  	struct rb_node *rbnext = rb_next(&last->rb_node);
  	struct rb_node *rbprev = rb_prev(&last->rb_node);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
765
  	struct request *next = NULL, *prev = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
766

21183b07e   Jens Axboe   [PATCH] cfq-iosch...
767
  	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
768
769
  
  	if (rbprev)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
770
  		prev = rb_entry_rq(rbprev);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
771

21183b07e   Jens Axboe   [PATCH] cfq-iosch...
772
  	if (rbnext)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
773
  		next = rb_entry_rq(rbnext);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
774
775
776
  	else {
  		rbnext = rb_first(&cfqq->sort_list);
  		if (rbnext && rbnext != &last->rb_node)
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
777
  			next = rb_entry_rq(rbnext);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
778
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
779

cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
780
  	return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
781
  }
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
782
783
  static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  				      struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
784
  {
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
785
786
787
  	/*
  	 * just an approximation, should be ok.
  	 */
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
788
  	return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
464191c65   Jens Axboe   Revert "cfq: Make...
789
  		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
790
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
  static inline s64
  cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
  {
  	return cfqg->vdisktime - st->min_vdisktime;
  }
  
  static void
  __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  {
  	struct rb_node **node = &st->rb.rb_node;
  	struct rb_node *parent = NULL;
  	struct cfq_group *__cfqg;
  	s64 key = cfqg_key(st, cfqg);
  	int left = 1;
  
  	while (*node != NULL) {
  		parent = *node;
  		__cfqg = rb_entry_cfqg(parent);
  
  		if (key < cfqg_key(st, __cfqg))
  			node = &parent->rb_left;
  		else {
  			node = &parent->rb_right;
  			left = 0;
  		}
  	}
  
  	if (left)
  		st->left = &cfqg->rb_node;
  
  	rb_link_node(&cfqg->rb_node, parent, node);
  	rb_insert_color(&cfqg->rb_node, &st->rb);
  }
  
  static void
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
  cfq_update_group_weight(struct cfq_group *cfqg)
  {
  	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  	if (cfqg->needs_update) {
  		cfqg->weight = cfqg->new_weight;
  		cfqg->needs_update = false;
  	}
  }
  
  static void
  cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  {
  	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  
  	cfq_update_group_weight(cfqg);
  	__cfq_group_service_tree_add(st, cfqg);
  	st->total_weight += cfqg->weight;
  }
  
  static void
  cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
847
848
849
850
851
852
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
  	struct cfq_group *__cfqg;
  	struct rb_node *n;
  
  	cfqg->nr_cfqq++;
760701bfe   Gui Jianfeng   cfq-iosched: Get ...
853
  	if (!RB_EMPTY_NODE(&cfqg->rb_node))
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
854
855
856
857
858
  		return;
  
  	/*
  	 * Currently put the group at the end. Later implement something
  	 * so that groups get lesser vtime based on their weights, so that
25985edce   Lucas De Marchi   Fix common misspe...
859
  	 * if group does not loose all if it was not continuously backlogged.
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
860
861
862
863
864
865
866
  	 */
  	n = rb_last(&st->rb);
  	if (n) {
  		__cfqg = rb_entry_cfqg(n);
  		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
  	} else
  		cfqg->vdisktime = st->min_vdisktime;
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
867
868
  	cfq_group_service_tree_add(st, cfqg);
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
869

8184f93ec   Justin TerAvest   cfq-iosched: Don'...
870
871
872
873
874
875
  static void
  cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
  {
  	st->total_weight -= cfqg->weight;
  	if (!RB_EMPTY_NODE(&cfqg->rb_node))
  		cfq_rb_erase(&cfqg->rb_node, st);
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
876
877
878
  }
  
  static void
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
879
  cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
880
881
882
883
884
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
  
  	BUG_ON(cfqg->nr_cfqq < 1);
  	cfqg->nr_cfqq--;
25bc6b077   Vivek Goyal   blkio: Introduce ...
885

1fa8f6d68   Vivek Goyal   blkio: Introduce ...
886
887
888
  	/* If there are other cfq queues under this group, don't delete it */
  	if (cfqg->nr_cfqq)
  		return;
2868ef7b3   Vivek Goyal   blkio: Some debug...
889
  	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
890
  	cfq_group_service_tree_del(st, cfqg);
dae739ebc   Vivek Goyal   blkio: Group time...
891
  	cfqg->saved_workload_slice = 0;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
892
  	cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
dae739ebc   Vivek Goyal   blkio: Group time...
893
  }
167400d34   Justin TerAvest   blk-cgroup: Add u...
894
895
  static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
  						unsigned int *unaccounted_time)
dae739ebc   Vivek Goyal   blkio: Group time...
896
  {
f75edf2dc   Vivek Goyal   blkio: Wait for c...
897
  	unsigned int slice_used;
dae739ebc   Vivek Goyal   blkio: Group time...
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
  
  	/*
  	 * Queue got expired before even a single request completed or
  	 * got expired immediately after first request completion.
  	 */
  	if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
  		/*
  		 * Also charge the seek time incurred to the group, otherwise
  		 * if there are mutiple queues in the group, each can dispatch
  		 * a single request on seeky media and cause lots of seek time
  		 * and group will never know it.
  		 */
  		slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
  					1);
  	} else {
  		slice_used = jiffies - cfqq->slice_start;
167400d34   Justin TerAvest   blk-cgroup: Add u...
914
915
  		if (slice_used > cfqq->allocated_slice) {
  			*unaccounted_time = slice_used - cfqq->allocated_slice;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
916
  			slice_used = cfqq->allocated_slice;
167400d34   Justin TerAvest   blk-cgroup: Add u...
917
918
919
920
  		}
  		if (time_after(cfqq->slice_start, cfqq->dispatch_start))
  			*unaccounted_time += cfqq->slice_start -
  					cfqq->dispatch_start;
dae739ebc   Vivek Goyal   blkio: Group time...
921
  	}
dae739ebc   Vivek Goyal   blkio: Group time...
922
923
924
925
  	return slice_used;
  }
  
  static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
926
  				struct cfq_queue *cfqq)
dae739ebc   Vivek Goyal   blkio: Group time...
927
928
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
167400d34   Justin TerAvest   blk-cgroup: Add u...
929
  	unsigned int used_sl, charge, unaccounted_sl = 0;
f26bd1f0a   Vivek Goyal   blkio: Determine ...
930
931
932
933
  	int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
  			- cfqg->service_tree_idle.count;
  
  	BUG_ON(nr_sync < 0);
167400d34   Justin TerAvest   blk-cgroup: Add u...
934
  	used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
dae739ebc   Vivek Goyal   blkio: Group time...
935

02b35081f   Vivek Goyal   cfq-iosched: Do g...
936
937
938
939
  	if (iops_mode(cfqd))
  		charge = cfqq->slice_dispatch;
  	else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
  		charge = cfqq->allocated_slice;
dae739ebc   Vivek Goyal   blkio: Group time...
940
941
  
  	/* Can't update vdisktime while group is on service tree */
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
942
  	cfq_group_service_tree_del(st, cfqg);
02b35081f   Vivek Goyal   cfq-iosched: Do g...
943
  	cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
944
945
  	/* If a new weight was requested, update now, off tree */
  	cfq_group_service_tree_add(st, cfqg);
dae739ebc   Vivek Goyal   blkio: Group time...
946
947
948
949
950
951
952
953
954
  
  	/* This group is being expired. Save the context */
  	if (time_after(cfqd->workload_expires, jiffies)) {
  		cfqg->saved_workload_slice = cfqd->workload_expires
  						- jiffies;
  		cfqg->saved_workload = cfqd->serving_type;
  		cfqg->saved_serving_prio = cfqd->serving_prio;
  	} else
  		cfqg->saved_workload_slice = 0;
2868ef7b3   Vivek Goyal   blkio: Some debug...
955
956
957
  
  	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
  					st->min_vdisktime);
fd16d2631   Joe Perches   block: Add __attr...
958
959
960
961
  	cfq_log_cfqq(cfqq->cfqd, cfqq,
  		     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
  		     used_sl, cfqq->slice_dispatch, charge,
  		     iops_mode(cfqd), cfqq->nr_sectors);
167400d34   Justin TerAvest   blk-cgroup: Add u...
962
963
  	cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
  					  unaccounted_sl);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
964
  	cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
965
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
966
967
968
969
970
971
972
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
  {
  	if (blkg)
  		return container_of(blkg, struct cfq_group, blkg);
  	return NULL;
  }
8aea45451   Paul Bolle   CFQ: make two fun...
973
974
  static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
  					  unsigned int weight)
f8d461d69   Vivek Goyal   blkio: Propagate ...
975
  {
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
976
977
978
  	struct cfq_group *cfqg = cfqg_of_blkg(blkg);
  	cfqg->new_weight = weight;
  	cfqg->needs_update = true;
f8d461d69   Vivek Goyal   blkio: Propagate ...
979
  }
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
980
981
  static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
  			struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
982
  {
220841906   Vivek Goyal   blkio: Export dis...
983
984
  	struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
  	unsigned int major, minor;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
985

f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
986
987
988
989
990
991
992
  	/*
  	 * Add group onto cgroup list. It might happen that bdi->dev is
  	 * not initialized yet. Initialize this new group without major
  	 * and minor info and this info will be filled in once a new thread
  	 * comes for IO.
  	 */
  	if (bdi->dev) {
a74b2adae   Ricky Benitez   block: expose the...
993
  		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
  		cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
  					(void *)cfqd, MKDEV(major, minor));
  	} else
  		cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
  					(void *)cfqd, 0);
  
  	cfqd->nr_blkcg_linked_grps++;
  	cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
  
  	/* Add group on cfqd list */
  	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
  }
  
  /*
   * Should be called from sleepable context. No request queue lock as per
   * cpu stats are allocated dynamically and alloc_percpu needs to be called
   * from sleepable context.
   */
  static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
  {
  	struct cfq_group *cfqg = NULL;
5624a4e44   Vivek Goyal   blk-throttle: Mak...
1015
  	int i, j, ret;
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1016
  	struct cfq_rb_root *st;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1017
1018
1019
  
  	cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
  	if (!cfqg)
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1020
  		return NULL;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1021

25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1022
1023
1024
  	for_each_cfqg_st(cfqg, i, j, st)
  		*st = CFQ_RB_ROOT;
  	RB_CLEAR_NODE(&cfqg->rb_node);
7700fc4f6   Shaohua Li   CFQ: add think ti...
1025
  	cfqg->ttime.last_end_request = jiffies;
b1c357696   Vivek Goyal   blkio: Take care ...
1026
1027
1028
1029
1030
1031
  	/*
  	 * Take the initial reference that will be released on destroy
  	 * This can be thought of a joint reference by cgroup and
  	 * elevator which will be dropped by either elevator exit
  	 * or cgroup deletion path depending on who is exiting first.
  	 */
329a67815   Shaohua Li   block cfq: don't ...
1032
  	cfqg->ref = 1;
5624a4e44   Vivek Goyal   blk-throttle: Mak...
1033
1034
1035
1036
1037
1038
  
  	ret = blkio_alloc_blkg_stats(&cfqg->blkg);
  	if (ret) {
  		kfree(cfqg);
  		return NULL;
  	}
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
  	return cfqg;
  }
  
  static struct cfq_group *
  cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
  {
  	struct cfq_group *cfqg = NULL;
  	void *key = cfqd;
  	struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
  	unsigned int major, minor;
b1c357696   Vivek Goyal   blkio: Take care ...
1049

180be2a04   Vivek Goyal   cfq-iosched: fix ...
1050
  	/*
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1051
1052
  	 * This is the common case when there are no blkio cgroups.
  	 * Avoid lookup in this case
180be2a04   Vivek Goyal   cfq-iosched: fix ...
1053
  	 */
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1054
1055
1056
1057
  	if (blkcg == &blkio_root_cgroup)
  		cfqg = &cfqd->root_group;
  	else
  		cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1058

f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1059
1060
1061
1062
  	if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
  		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
  		cfqg->blkg.dev = MKDEV(major, minor);
  	}
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1063

25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1064
1065
1066
1067
  	return cfqg;
  }
  
  /*
3e59cf9d6   Vivek Goyal   cfq-iosched: Get ...
1068
1069
   * Search for the cfq group current task belongs to. request_queue lock must
   * be held.
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1070
   */
3e59cf9d6   Vivek Goyal   cfq-iosched: Get ...
1071
  static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1072
  {
70087dc38   Vivek Goyal   blk-throttle: Use...
1073
  	struct blkio_cgroup *blkcg;
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1074
1075
  	struct cfq_group *cfqg = NULL, *__cfqg = NULL;
  	struct request_queue *q = cfqd->queue;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1076
1077
  
  	rcu_read_lock();
70087dc38   Vivek Goyal   blk-throttle: Use...
1078
  	blkcg = task_blkio_cgroup(current);
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
  	cfqg = cfq_find_cfqg(cfqd, blkcg);
  	if (cfqg) {
  		rcu_read_unlock();
  		return cfqg;
  	}
  
  	/*
  	 * Need to allocate a group. Allocation of group also needs allocation
  	 * of per cpu stats which in-turn takes a mutex() and can block. Hence
  	 * we need to drop rcu lock and queue_lock before we call alloc.
  	 *
  	 * Not taking any queue reference here and assuming that queue is
  	 * around by the time we return. CFQ queue allocation code does
  	 * the same. It might be racy though.
  	 */
  
  	rcu_read_unlock();
  	spin_unlock_irq(q->queue_lock);
  
  	cfqg = cfq_alloc_cfqg(cfqd);
  
  	spin_lock_irq(q->queue_lock);
  
  	rcu_read_lock();
  	blkcg = task_blkio_cgroup(current);
  
  	/*
  	 * If some other thread already allocated the group while we were
  	 * not holding queue lock, free up the group
  	 */
  	__cfqg = cfq_find_cfqg(cfqd, blkcg);
  
  	if (__cfqg) {
  		kfree(cfqg);
  		rcu_read_unlock();
  		return __cfqg;
  	}
3e59cf9d6   Vivek Goyal   cfq-iosched: Get ...
1116
  	if (!cfqg)
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1117
  		cfqg = &cfqd->root_group;
f469a7b4d   Vivek Goyal   blk-cgroup: Allow...
1118
1119
  
  	cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1120
1121
1122
  	rcu_read_unlock();
  	return cfqg;
  }
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1123
1124
  static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
  {
329a67815   Shaohua Li   block cfq: don't ...
1125
  	cfqg->ref++;
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1126
1127
  	return cfqg;
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1128
1129
1130
1131
1132
1133
1134
  static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
  {
  	/* Currently, all async queues are mapped to root group */
  	if (!cfq_cfqq_sync(cfqq))
  		cfqg = &cfqq->cfqd->root_group;
  
  	cfqq->cfqg = cfqg;
b1c357696   Vivek Goyal   blkio: Take care ...
1135
  	/* cfqq reference on cfqg */
329a67815   Shaohua Li   block cfq: don't ...
1136
  	cfqq->cfqg->ref++;
b1c357696   Vivek Goyal   blkio: Take care ...
1137
1138
1139
1140
1141
1142
  }
  
  static void cfq_put_cfqg(struct cfq_group *cfqg)
  {
  	struct cfq_rb_root *st;
  	int i, j;
329a67815   Shaohua Li   block cfq: don't ...
1143
1144
1145
  	BUG_ON(cfqg->ref <= 0);
  	cfqg->ref--;
  	if (cfqg->ref)
b1c357696   Vivek Goyal   blkio: Take care ...
1146
1147
  		return;
  	for_each_cfqg_st(cfqg, i, j, st)
b54ce60eb   Gui Jianfeng   cfq-iosched: Get ...
1148
  		BUG_ON(!RB_EMPTY_ROOT(&st->rb));
5624a4e44   Vivek Goyal   blk-throttle: Mak...
1149
  	free_percpu(cfqg->blkg.stats_cpu);
b1c357696   Vivek Goyal   blkio: Take care ...
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
  	kfree(cfqg);
  }
  
  static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
  {
  	/* Something wrong if we are trying to remove same group twice */
  	BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
  
  	hlist_del_init(&cfqg->cfqd_node);
  
  	/*
  	 * Put the reference taken at the time of creation so that when all
  	 * queues are gone, group can be destroyed.
  	 */
  	cfq_put_cfqg(cfqg);
  }
  
  static void cfq_release_cfq_groups(struct cfq_data *cfqd)
  {
  	struct hlist_node *pos, *n;
  	struct cfq_group *cfqg;
  
  	hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
  		/*
  		 * If cgroup removal path got to blk_group first and removed
  		 * it from cgroup list, then it will take care of destroying
  		 * cfqg also.
  		 */
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1178
  		if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
b1c357696   Vivek Goyal   blkio: Take care ...
1179
1180
  			cfq_destroy_cfqg(cfqd, cfqg);
  	}
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1181
  }
b1c357696   Vivek Goyal   blkio: Take care ...
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
  
  /*
   * Blk cgroup controller notification saying that blkio_group object is being
   * delinked as associated cgroup object is going away. That also means that
   * no new IO will come in this group. So get rid of this group as soon as
   * any pending IO in the group is finished.
   *
   * This function is called under rcu_read_lock(). key is the rcu protected
   * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
   * read lock.
   *
   * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
   * it should not be NULL as even if elevator was exiting, cgroup deltion
   * path got to it first.
   */
8aea45451   Paul Bolle   CFQ: make two fun...
1197
  static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
b1c357696   Vivek Goyal   blkio: Take care ...
1198
1199
1200
1201
1202
1203
1204
1205
  {
  	unsigned long  flags;
  	struct cfq_data *cfqd = key;
  
  	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  	cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
  	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1206
  #else /* GROUP_IOSCHED */
3e59cf9d6   Vivek Goyal   cfq-iosched: Get ...
1207
  static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1208
1209
1210
  {
  	return &cfqd->root_group;
  }
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1211
1212
1213
  
  static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
  {
50eaeb323   Dmitry Monakhov   cfq-iosched: fix ...
1214
  	return cfqg;
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1215
  }
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1216
1217
1218
1219
  static inline void
  cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
  	cfqq->cfqg = cfqg;
  }
b1c357696   Vivek Goyal   blkio: Take care ...
1220
1221
  static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
  static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1222
  #endif /* GROUP_IOSCHED */
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1223
  /*
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1224
   * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1225
1226
1227
   * requests waiting to be processed. It is sorted in the order that
   * we will service the queues.
   */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1228
  static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1229
  				 bool add_front)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1230
  {
0871714e0   Jens Axboe   cfq-iosched: rela...
1231
1232
  	struct rb_node **p, *parent;
  	struct cfq_queue *__cfqq;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1233
  	unsigned long rb_key;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1234
  	struct cfq_rb_root *service_tree;
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1235
  	int left;
dae739ebc   Vivek Goyal   blkio: Group time...
1236
  	int new_cfqq = 1;
ae30c2865   Vivek Goyal   blkio: Implement ...
1237

cdb16e8f7   Vivek Goyal   blkio: Introduce ...
1238
  	service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
65b32a573   Vivek Goyal   cfq-iosched: Remo...
1239
  						cfqq_type(cfqq));
0871714e0   Jens Axboe   cfq-iosched: rela...
1240
1241
  	if (cfq_class_idle(cfqq)) {
  		rb_key = CFQ_IDLE_DELAY;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1242
  		parent = rb_last(&service_tree->rb);
0871714e0   Jens Axboe   cfq-iosched: rela...
1243
1244
1245
1246
1247
1248
  		if (parent && parent != &cfqq->rb_node) {
  			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  			rb_key += __cfqq->rb_key;
  		} else
  			rb_key += jiffies;
  	} else if (!add_front) {
b9c8946b1   Jens Axboe   cfq-iosched: fix ...
1249
1250
1251
1252
1253
1254
  		/*
  		 * Get our rb key offset. Subtract any residual slice
  		 * value carried from last service. A negative resid
  		 * count indicates slice overrun, and this should position
  		 * the next service time further away in the tree.
  		 */
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1255
  		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
b9c8946b1   Jens Axboe   cfq-iosched: fix ...
1256
  		rb_key -= cfqq->slice_resid;
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1257
  		cfqq->slice_resid = 0;
48e025e63   Corrado Zoccolo   cfq-iosched: fix ...
1258
1259
  	} else {
  		rb_key = -HZ;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1260
  		__cfqq = cfq_rb_first(service_tree);
48e025e63   Corrado Zoccolo   cfq-iosched: fix ...
1261
1262
  		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1263

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1264
  	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
dae739ebc   Vivek Goyal   blkio: Group time...
1265
  		new_cfqq = 0;
99f9628ab   Jens Axboe   [PATCH] cfq-iosch...
1266
  		/*
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1267
  		 * same position, nothing more to do
99f9628ab   Jens Axboe   [PATCH] cfq-iosch...
1268
  		 */
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1269
1270
  		if (rb_key == cfqq->rb_key &&
  		    cfqq->service_tree == service_tree)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1271
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1272

aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1273
1274
  		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  		cfqq->service_tree = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1275
  	}
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1276

498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1277
  	left = 1;
0871714e0   Jens Axboe   cfq-iosched: rela...
1278
  	parent = NULL;
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1279
1280
  	cfqq->service_tree = service_tree;
  	p = &service_tree->rb.rb_node;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1281
  	while (*p) {
67060e379   Jens Axboe   cfq-iosched: sort...
1282
  		struct rb_node **n;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1283

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1284
1285
  		parent = *p;
  		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
0c534e0a4   Jens Axboe   cfq-iosched: sort...
1286
  		/*
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1287
  		 * sort by key, that represents service time.
0c534e0a4   Jens Axboe   cfq-iosched: sort...
1288
  		 */
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1289
  		if (time_before(rb_key, __cfqq->rb_key))
67060e379   Jens Axboe   cfq-iosched: sort...
1290
  			n = &(*p)->rb_left;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1291
  		else {
67060e379   Jens Axboe   cfq-iosched: sort...
1292
  			n = &(*p)->rb_right;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1293
  			left = 0;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1294
  		}
67060e379   Jens Axboe   cfq-iosched: sort...
1295
1296
  
  		p = n;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1297
  	}
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1298
  	if (left)
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1299
  		service_tree->left = &cfqq->rb_node;
cc09e2990   Jens Axboe   [PATCH] cfq-iosch...
1300

d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1301
1302
  	cfqq->rb_key = rb_key;
  	rb_link_node(&cfqq->rb_node, parent, p);
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1303
1304
  	rb_insert_color(&cfqq->rb_node, &service_tree->rb);
  	service_tree->count++;
20359f27e   Namhyung Kim   cfq-iosched: remo...
1305
  	if (add_front || !new_cfqq)
dae739ebc   Vivek Goyal   blkio: Group time...
1306
  		return;
8184f93ec   Justin TerAvest   cfq-iosched: Don'...
1307
  	cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1308
  }
a36e71f99   Jens Axboe   cfq-iosched: add ...
1309
  static struct cfq_queue *
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1310
1311
1312
  cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
  		     sector_t sector, struct rb_node **ret_parent,
  		     struct rb_node ***rb_link)
a36e71f99   Jens Axboe   cfq-iosched: add ...
1313
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
  	struct rb_node **p, *parent;
  	struct cfq_queue *cfqq = NULL;
  
  	parent = NULL;
  	p = &root->rb_node;
  	while (*p) {
  		struct rb_node **n;
  
  		parent = *p;
  		cfqq = rb_entry(parent, struct cfq_queue, p_node);
  
  		/*
  		 * Sort strictly based on sector.  Smallest to the left,
  		 * largest to the right.
  		 */
2e46e8b27   Tejun Heo   block: drop reque...
1329
  		if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1330
  			n = &(*p)->rb_right;
2e46e8b27   Tejun Heo   block: drop reque...
1331
  		else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1332
1333
1334
1335
  			n = &(*p)->rb_left;
  		else
  			break;
  		p = n;
3ac6c9f8a   Jens Axboe   cfq-iosched: fix ...
1336
  		cfqq = NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1337
1338
1339
1340
1341
  	}
  
  	*ret_parent = parent;
  	if (rb_link)
  		*rb_link = p;
3ac6c9f8a   Jens Axboe   cfq-iosched: fix ...
1342
  	return cfqq;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1343
1344
1345
1346
  }
  
  static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
1347
1348
  	struct rb_node **p, *parent;
  	struct cfq_queue *__cfqq;
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1349
1350
1351
1352
  	if (cfqq->p_root) {
  		rb_erase(&cfqq->p_node, cfqq->p_root);
  		cfqq->p_root = NULL;
  	}
a36e71f99   Jens Axboe   cfq-iosched: add ...
1353
1354
1355
1356
1357
  
  	if (cfq_class_idle(cfqq))
  		return;
  	if (!cfqq->next_rq)
  		return;
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1358
  	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b27   Tejun Heo   block: drop reque...
1359
1360
  	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
  				      blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8a   Jens Axboe   cfq-iosched: fix ...
1361
1362
  	if (!__cfqq) {
  		rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1363
1364
1365
  		rb_insert_color(&cfqq->p_node, cfqq->p_root);
  	} else
  		cfqq->p_root = NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1366
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1367
1368
1369
  /*
   * Update cfqq's position in the service tree.
   */
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1370
  static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1371
  {
6d048f531   Jens Axboe   cfq-iosched: deve...
1372
1373
1374
  	/*
  	 * Resorting requires the cfqq to be on the RR list already.
  	 */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1375
  	if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1376
  		cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f99   Jens Axboe   cfq-iosched: add ...
1377
1378
  		cfq_prio_tree_add(cfqd, cfqq);
  	}
6d048f531   Jens Axboe   cfq-iosched: deve...
1379
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1380
1381
  /*
   * add to busy list of queues for service, trying to be fair in ordering
22e2c507c   Jens Axboe   [PATCH] Update cf...
1382
   * the pending list according to last request service
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1383
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1384
  static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1385
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
1386
  	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1387
1388
  	BUG_ON(cfq_cfqq_on_rr(cfqq));
  	cfq_mark_cfqq_on_rr(cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1389
  	cfqd->busy_queues++;
ef8a41df8   Shaohua Li   cfq-iosched: give...
1390
1391
  	if (cfq_cfqq_sync(cfqq))
  		cfqd->busy_sync_queues++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1392

edd75ffd9   Jens Axboe   cfq-iosched: get ...
1393
  	cfq_resort_rr_list(cfqd, cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1394
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1395
1396
1397
1398
  /*
   * Called when the cfqq no longer has requests pending, remove it from
   * the service tree.
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1399
  static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1400
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
1401
  	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1402
1403
  	BUG_ON(!cfq_cfqq_on_rr(cfqq));
  	cfq_clear_cfqq_on_rr(cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1404

aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
1405
1406
1407
1408
  	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  		cfqq->service_tree = NULL;
  	}
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1409
1410
1411
1412
  	if (cfqq->p_root) {
  		rb_erase(&cfqq->p_node, cfqq->p_root);
  		cfqq->p_root = NULL;
  	}
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1413

8184f93ec   Justin TerAvest   cfq-iosched: Don'...
1414
  	cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1415
1416
  	BUG_ON(!cfqd->busy_queues);
  	cfqd->busy_queues--;
ef8a41df8   Shaohua Li   cfq-iosched: give...
1417
1418
  	if (cfq_cfqq_sync(cfqq))
  		cfqd->busy_sync_queues--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1419
1420
1421
1422
1423
  }
  
  /*
   * rb tree support functions
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1424
  static void cfq_del_rq_rb(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1425
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1426
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1427
  	const int sync = rq_is_sync(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1428

b4878f245   Jens Axboe   [PATCH] 02/05: up...
1429
1430
  	BUG_ON(!cfqq->queued[sync]);
  	cfqq->queued[sync]--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1431

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1432
  	elv_rb_del(&cfqq->sort_list, rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1433

f04a64246   Vivek Goyal   blkio: Keep queue...
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
  	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
  		/*
  		 * Queue will be deleted from service tree when we actually
  		 * expire it later. Right now just remove it from prio tree
  		 * as it is empty.
  		 */
  		if (cfqq->p_root) {
  			rb_erase(&cfqq->p_node, cfqq->p_root);
  			cfqq->p_root = NULL;
  		}
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1445
  }
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1446
  static void cfq_add_rq_rb(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1447
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1448
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1449
  	struct cfq_data *cfqd = cfqq->cfqd;
796d5116c   Jeff Moyer   iosched: prevent ...
1450
  	struct request *prev;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1451

5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1452
  	cfqq->queued[rq_is_sync(rq)]++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1453

796d5116c   Jeff Moyer   iosched: prevent ...
1454
  	elv_rb_add(&cfqq->sort_list, rq);
5fccbf61b   Jens Axboe   [PATCH] CFQ: requ...
1455
1456
1457
  
  	if (!cfq_cfqq_on_rr(cfqq))
  		cfq_add_cfqq_rr(cfqd, cfqq);
5044eed48   Jens Axboe   cfq-iosched: fix ...
1458
1459
1460
1461
  
  	/*
  	 * check if this request is a better next-serve candidate
  	 */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1462
  	prev = cfqq->next_rq;
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
1463
  	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
a36e71f99   Jens Axboe   cfq-iosched: add ...
1464
1465
1466
1467
1468
1469
  
  	/*
  	 * adjust priority tree position, if ->next_rq changes
  	 */
  	if (prev != cfqq->next_rq)
  		cfq_prio_tree_add(cfqd, cfqq);
5044eed48   Jens Axboe   cfq-iosched: fix ...
1470
  	BUG_ON(!cfqq->next_rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1471
  }
febffd618   Jens Axboe   cfq-iosched: kill...
1472
  static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1473
  {
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1474
1475
  	elv_rb_del(&cfqq->sort_list, rq);
  	cfqq->queued[rq_is_sync(rq)]--;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1476
1477
  	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
  					rq_data_dir(rq), rq_is_sync(rq));
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1478
  	cfq_add_rq_rb(rq);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1479
  	cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
1480
1481
  			&cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
  			rq_is_sync(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1482
  }
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1483
1484
  static struct request *
  cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1485
  {
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1486
  	struct task_struct *tsk = current;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1487
  	struct cfq_io_context *cic;
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1488
  	struct cfq_queue *cfqq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1489

4ac845a2e   Jens Axboe   block: cfq: make ...
1490
  	cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1491
1492
1493
1494
  	if (!cic)
  		return NULL;
  
  	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
1495
1496
  	if (cfqq) {
  		sector_t sector = bio->bi_sector + bio_sectors(bio);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1497
  		return elv_rb_find(&cfqq->sort_list, sector);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
1498
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1499

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1500
1501
  	return NULL;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1502
  static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1503
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
1504
  	struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1505

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1506
  	cfqd->rq_in_driver++;
7b679138b   Jens Axboe   cfq-iosched: add ...
1507
  	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1508
  						cfqd->rq_in_driver);
25776e359   Jens Axboe   [PATCH] cfq-iosch...
1509

5b93629b4   Tejun Heo   block: implement ...
1510
  	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1511
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1512
  static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1513
  {
b4878f245   Jens Axboe   [PATCH] 02/05: up...
1514
  	struct cfq_data *cfqd = q->elevator->elevator_data;
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1515
1516
  	WARN_ON(!cfqd->rq_in_driver);
  	cfqd->rq_in_driver--;
7b679138b   Jens Axboe   cfq-iosched: add ...
1517
  	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1518
  						cfqd->rq_in_driver);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1519
  }
b4878f245   Jens Axboe   [PATCH] 02/05: up...
1520
  static void cfq_remove_request(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1521
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1522
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1523

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1524
1525
  	if (cfqq->next_rq == rq)
  		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1526

b4878f245   Jens Axboe   [PATCH] 02/05: up...
1527
  	list_del_init(&rq->queuelist);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1528
  	cfq_del_rq_rb(rq);
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
1529

45333d5a3   Aaron Carroll   cfq-iosched: fix ...
1530
  	cfqq->cfqd->rq_queued--;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1531
1532
  	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
  					rq_data_dir(rq), rq_is_sync(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1533
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1534
1535
  static int cfq_merge(struct request_queue *q, struct request **req,
  		     struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1536
1537
1538
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct request *__rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1539

206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1540
  	__rq = cfq_find_rq_fmerge(cfqd, bio);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1541
  	if (__rq && elv_rq_merge_ok(__rq, bio)) {
9817064b6   Jens Axboe   [PATCH] elevator:...
1542
1543
  		*req = __rq;
  		return ELEVATOR_FRONT_MERGE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1544
1545
1546
  	}
  
  	return ELEVATOR_NO_MERGE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1547
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1548
  static void cfq_merged_request(struct request_queue *q, struct request *req,
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1549
  			       int type)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1550
  {
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
1551
  	if (type == ELEVATOR_FRONT_MERGE) {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1552
  		struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1553

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1554
  		cfq_reposition_rq_rb(cfqq, req);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1555
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1556
  }
812d40264   Divyesh Shah   blkio: Add io_mer...
1557
1558
1559
  static void cfq_bio_merged(struct request_queue *q, struct request *req,
  				struct bio *bio)
  {
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1560
1561
  	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
  					bio_data_dir(bio), cfq_bio_sync(bio));
812d40264   Divyesh Shah   blkio: Add io_mer...
1562
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1563
  static void
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1564
  cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1565
1566
  		    struct request *next)
  {
cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
1567
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1568
1569
1570
1571
  	/*
  	 * reposition in fifo if next is older than rq
  	 */
  	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
30996f40b   Jens Axboe   cfq-iosched: fix ...
1572
  	    time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
22e2c507c   Jens Axboe   [PATCH] Update cf...
1573
  		list_move(&rq->queuelist, &next->queuelist);
30996f40b   Jens Axboe   cfq-iosched: fix ...
1574
1575
  		rq_set_fifo_time(rq, rq_fifo_time(next));
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
1576

cf7c25cf9   Corrado Zoccolo   cfq-iosched: fix ...
1577
1578
  	if (cfqq->next_rq == next)
  		cfqq->next_rq = rq;
b4878f245   Jens Axboe   [PATCH] 02/05: up...
1579
  	cfq_remove_request(next);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1580
1581
  	cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
  					rq_data_dir(next), rq_is_sync(next));
22e2c507c   Jens Axboe   [PATCH] Update cf...
1582
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1583
  static int cfq_allow_merge(struct request_queue *q, struct request *rq,
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1584
1585
1586
  			   struct bio *bio)
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1587
  	struct cfq_io_context *cic;
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1588
  	struct cfq_queue *cfqq;
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1589
1590
  
  	/*
ec8acb690   Jens Axboe   [PATCH] cfq-iosch...
1591
  	 * Disallow merge of a sync bio into an async request.
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1592
  	 */
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1593
  	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1594
  		return false;
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1595
1596
  
  	/*
719d34027   Jens Axboe   [PATCH] cfq-iosch...
1597
1598
  	 * Lookup the cfqq that this bio will be queued with. Allow
  	 * merge only if rq is queued there.
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1599
  	 */
4ac845a2e   Jens Axboe   block: cfq: make ...
1600
  	cic = cfq_cic_lookup(cfqd, current->io_context);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
1601
  	if (!cic)
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1602
  		return false;
719d34027   Jens Axboe   [PATCH] cfq-iosch...
1603

91fac317a   Vasily Tarasov   cfq-iosched: get ...
1604
  	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
a6151c3a5   Jens Axboe   cfq-iosched: appl...
1605
  	return cfqq == RQ_CFQQ(rq);
da7752650   Jens Axboe   [PATCH] cfq-iosch...
1606
  }
812df48d1   Divyesh Shah   blkio: Add more d...
1607
1608
1609
  static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	del_timer(&cfqd->idle_slice_timer);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1610
  	cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
812df48d1   Divyesh Shah   blkio: Add more d...
1611
  }
febffd618   Jens Axboe   cfq-iosched: kill...
1612
1613
  static void __cfq_set_active_queue(struct cfq_data *cfqd,
  				   struct cfq_queue *cfqq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
1614
1615
  {
  	if (cfqq) {
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
1616
1617
  		cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
  				cfqd->serving_prio, cfqd->serving_type);
62a37f6ba   Justin TerAvest   cfq-iosched: Don'...
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
  		cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
  		cfqq->slice_start = 0;
  		cfqq->dispatch_start = jiffies;
  		cfqq->allocated_slice = 0;
  		cfqq->slice_end = 0;
  		cfqq->slice_dispatch = 0;
  		cfqq->nr_sectors = 0;
  
  		cfq_clear_cfqq_wait_request(cfqq);
  		cfq_clear_cfqq_must_dispatch(cfqq);
  		cfq_clear_cfqq_must_alloc_slice(cfqq);
  		cfq_clear_cfqq_fifo_expire(cfqq);
  		cfq_mark_cfqq_slice_new(cfqq);
  
  		cfq_del_timer(cfqd, cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1633
1634
1635
1636
1637
1638
  	}
  
  	cfqd->active_queue = cfqq;
  }
  
  /*
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1639
1640
1641
1642
   * current cfqq expired its slice (or was too idle), select new one
   */
  static void
  __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1643
  		    bool timed_out)
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1644
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
1645
  	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1646
  	if (cfq_cfqq_wait_request(cfqq))
812df48d1   Divyesh Shah   blkio: Add more d...
1647
  		cfq_del_timer(cfqd, cfqq);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1648

7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1649
  	cfq_clear_cfqq_wait_request(cfqq);
f75edf2dc   Vivek Goyal   blkio: Wait for c...
1650
  	cfq_clear_cfqq_wait_busy(cfqq);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1651
1652
  
  	/*
ae54abed6   Shaohua Li   cfq-iosched: spli...
1653
1654
1655
1656
1657
1658
1659
1660
1661
  	 * If this cfqq is shared between multiple processes, check to
  	 * make sure that those processes are still issuing I/Os within
  	 * the mean seek distance.  If not, it may be time to break the
  	 * queues apart again.
  	 */
  	if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
  		cfq_mark_cfqq_split_coop(cfqq);
  
  	/*
6084cdda0   Jens Axboe   cfq-iosched: don'...
1662
  	 * store what was left of this slice, if the queue idled/timed out
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1663
  	 */
c553f8e33   Shaohua Li   block cfq: compen...
1664
1665
  	if (timed_out) {
  		if (cfq_cfqq_slice_new(cfqq))
ba5bd520f   Vivek Goyal   cfq: rename a fun...
1666
  			cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e33   Shaohua Li   block cfq: compen...
1667
1668
  		else
  			cfqq->slice_resid = cfqq->slice_end - jiffies;
7b679138b   Jens Axboe   cfq-iosched: add ...
1669
1670
  		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
  	}
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1671

e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1672
  	cfq_group_served(cfqd, cfqq->cfqg, cfqq);
dae739ebc   Vivek Goyal   blkio: Group time...
1673

f04a64246   Vivek Goyal   blkio: Keep queue...
1674
1675
  	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
  		cfq_del_cfqq_rr(cfqd, cfqq);
edd75ffd9   Jens Axboe   cfq-iosched: get ...
1676
  	cfq_resort_rr_list(cfqd, cfqq);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1677
1678
1679
1680
1681
1682
1683
1684
  
  	if (cfqq == cfqd->active_queue)
  		cfqd->active_queue = NULL;
  
  	if (cfqd->active_cic) {
  		put_io_context(cfqd->active_cic->ioc);
  		cfqd->active_cic = NULL;
  	}
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1685
  }
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1686
  static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1687
1688
1689
1690
  {
  	struct cfq_queue *cfqq = cfqd->active_queue;
  
  	if (cfqq)
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
1691
  		__cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1692
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1693
1694
1695
1696
  /*
   * Get next queue for service. Unless we have a queue preemption,
   * we'll simply select the first cfqq in the service tree.
   */
6d048f531   Jens Axboe   cfq-iosched: deve...
1697
  static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507c   Jens Axboe   [PATCH] Update cf...
1698
  {
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1699
  	struct cfq_rb_root *service_tree =
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
1700
  		service_tree_for(cfqd->serving_group, cfqd->serving_prio,
65b32a573   Vivek Goyal   cfq-iosched: Remo...
1701
  					cfqd->serving_type);
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1702

f04a64246   Vivek Goyal   blkio: Keep queue...
1703
1704
  	if (!cfqd->rq_queued)
  		return NULL;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
1705
1706
1707
  	/* There is nothing to dispatch */
  	if (!service_tree)
  		return NULL;
c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1708
1709
1710
  	if (RB_EMPTY_ROOT(&service_tree->rb))
  		return NULL;
  	return cfq_rb_first(service_tree);
6d048f531   Jens Axboe   cfq-iosched: deve...
1711
  }
f04a64246   Vivek Goyal   blkio: Keep queue...
1712
1713
  static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
  {
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1714
  	struct cfq_group *cfqg;
f04a64246   Vivek Goyal   blkio: Keep queue...
1715
1716
1717
1718
1719
1720
  	struct cfq_queue *cfqq;
  	int i, j;
  	struct cfq_rb_root *st;
  
  	if (!cfqd->rq_queued)
  		return NULL;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
1721
1722
1723
  	cfqg = cfq_get_next_cfqg(cfqd);
  	if (!cfqg)
  		return NULL;
f04a64246   Vivek Goyal   blkio: Keep queue...
1724
1725
1726
1727
1728
  	for_each_cfqg_st(cfqg, i, j, st)
  		if ((cfqq = cfq_rb_first(st)) != NULL)
  			return cfqq;
  	return NULL;
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1729
1730
1731
  /*
   * Get and set a new active queue for service.
   */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1732
1733
  static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
  					      struct cfq_queue *cfqq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1734
  {
e00ef7997   Jens Axboe   cfq-iosched: get ...
1735
  	if (!cfqq)
a36e71f99   Jens Axboe   cfq-iosched: add ...
1736
  		cfqq = cfq_get_next_queue(cfqd);
6d048f531   Jens Axboe   cfq-iosched: deve...
1737

22e2c507c   Jens Axboe   [PATCH] Update cf...
1738
  	__cfq_set_active_queue(cfqd, cfqq);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1739
  	return cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
1740
  }
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1741
1742
1743
  static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
  					  struct request *rq)
  {
83096ebf1   Tejun Heo   block: convert to...
1744
1745
  	if (blk_rq_pos(rq) >= cfqd->last_position)
  		return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1746
  	else
83096ebf1   Tejun Heo   block: convert to...
1747
  		return cfqd->last_position - blk_rq_pos(rq);
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1748
  }
b2c18e1e0   Jeff Moyer   cfq: calculate th...
1749
  static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1750
  			       struct request *rq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1751
  {
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1752
  	return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
6d048f531   Jens Axboe   cfq-iosched: deve...
1753
  }
a36e71f99   Jens Axboe   cfq-iosched: add ...
1754
1755
1756
  static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
  				    struct cfq_queue *cur_cfqq)
  {
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1757
  	struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f99   Jens Axboe   cfq-iosched: add ...
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
  	struct rb_node *parent, *node;
  	struct cfq_queue *__cfqq;
  	sector_t sector = cfqd->last_position;
  
  	if (RB_EMPTY_ROOT(root))
  		return NULL;
  
  	/*
  	 * First, if we find a request starting at the end of the last
  	 * request, choose it.
  	 */
f2d1f0ae7   Jens Axboe   cfq-iosched: cach...
1769
  	__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f99   Jens Axboe   cfq-iosched: add ...
1770
1771
1772
1773
1774
1775
1776
1777
  	if (__cfqq)
  		return __cfqq;
  
  	/*
  	 * If the exact sector wasn't found, the parent of the NULL leaf
  	 * will contain the closest sector.
  	 */
  	__cfqq = rb_entry(parent, struct cfq_queue, p_node);
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1778
  	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1779
  		return __cfqq;
2e46e8b27   Tejun Heo   block: drop reque...
1780
  	if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f99   Jens Axboe   cfq-iosched: add ...
1781
1782
1783
1784
1785
1786
1787
  		node = rb_next(&__cfqq->p_node);
  	else
  		node = rb_prev(&__cfqq->p_node);
  	if (!node)
  		return NULL;
  
  	__cfqq = rb_entry(node, struct cfq_queue, p_node);
e9ce335df   Shaohua Li   cfq-iosched: fix ...
1788
  	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f99   Jens Axboe   cfq-iosched: add ...
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
  		return __cfqq;
  
  	return NULL;
  }
  
  /*
   * cfqd - obvious
   * cur_cfqq - passed in so that we don't decide that the current queue is
   * 	      closely cooperating with itself.
   *
   * So, basically we're assuming that that cur_cfqq has dispatched at least
   * one request, and that cfqd->last_position reflects a position on the disk
   * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
   * assumption.
   */
  static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d0408   Jeff Moyer   cfq: change the m...
1805
  					      struct cfq_queue *cur_cfqq)
6d048f531   Jens Axboe   cfq-iosched: deve...
1806
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
1807
  	struct cfq_queue *cfqq;
39c01b219   Divyesh Shah   cfq-iosched: Do n...
1808
1809
  	if (cfq_class_idle(cur_cfqq))
  		return NULL;
e6c5bc737   Jeff Moyer   cfq: break apart ...
1810
1811
1812
1813
  	if (!cfq_cfqq_sync(cur_cfqq))
  		return NULL;
  	if (CFQQ_SEEKY(cur_cfqq))
  		return NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1814
  	/*
b9d8f4c73   Gui Jianfeng   cfq: Optimization...
1815
1816
1817
1818
1819
1820
  	 * Don't search priority tree if it's the only queue in the group.
  	 */
  	if (cur_cfqq->cfqg->nr_cfqq == 1)
  		return NULL;
  
  	/*
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
1821
1822
1823
  	 * We should notice if some of the queues are cooperating, eg
  	 * working closely on the same area of the disk. In that case,
  	 * we can group them together and don't waste time idling.
6d048f531   Jens Axboe   cfq-iosched: deve...
1824
  	 */
a36e71f99   Jens Axboe   cfq-iosched: add ...
1825
1826
1827
  	cfqq = cfqq_close(cfqd, cur_cfqq);
  	if (!cfqq)
  		return NULL;
8682e1f15   Vivek Goyal   blkio: Provide so...
1828
1829
1830
  	/* If new queue belongs to different cfq_group, don't choose it */
  	if (cur_cfqq->cfqg != cfqq->cfqg)
  		return NULL;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1831
1832
1833
1834
1835
  	/*
  	 * It only makes sense to merge sync queues.
  	 */
  	if (!cfq_cfqq_sync(cfqq))
  		return NULL;
e6c5bc737   Jeff Moyer   cfq: break apart ...
1836
1837
  	if (CFQQ_SEEKY(cfqq))
  		return NULL;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1838

c0324a020   Corrado Zoccolo   cfq-iosched: reim...
1839
1840
1841
1842
1843
  	/*
  	 * Do not merge queues of different priority classes
  	 */
  	if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
  		return NULL;
a36e71f99   Jens Axboe   cfq-iosched: add ...
1844
  	return cfqq;
6d048f531   Jens Axboe   cfq-iosched: deve...
1845
  }
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1846
1847
1848
1849
1850
1851
1852
  /*
   * Determine whether we should enforce idle window for this queue.
   */
  
  static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	enum wl_prio_t prio = cfqq_prio(cfqq);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
1853
  	struct cfq_rb_root *service_tree = cfqq->service_tree;
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1854

f04a64246   Vivek Goyal   blkio: Keep queue...
1855
1856
  	BUG_ON(!service_tree);
  	BUG_ON(!service_tree->count);
b6508c161   Vivek Goyal   cfq-iosched: Do n...
1857
1858
  	if (!cfqd->cfq_slice_idle)
  		return false;
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1859
1860
1861
1862
1863
  	/* We never do for idle class queues. */
  	if (prio == IDLE_WORKLOAD)
  		return false;
  
  	/* We do for queues that were marked with idle window flag. */
3c764b7a6   Shaohua Li   cfq-iosched: make...
1864
1865
  	if (cfq_cfqq_idle_window(cfqq) &&
  	   !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1866
1867
1868
1869
1870
1871
  		return true;
  
  	/*
  	 * Otherwise, we do only if they are the last ones
  	 * in their service tree.
  	 */
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
1872
1873
  	if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
  	   !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
c1e44756f   Shaohua Li   cfq-iosched: do c...
1874
  		return true;
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
1875
1876
  	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
  			service_tree->count);
c1e44756f   Shaohua Li   cfq-iosched: do c...
1877
  	return false;
a6d44e982   Corrado Zoccolo   cfq-iosched: enab...
1878
  }
6d048f531   Jens Axboe   cfq-iosched: deve...
1879
  static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507c   Jens Axboe   [PATCH] Update cf...
1880
  {
1792669cc   Jens Axboe   cfq-iosched: don'...
1881
  	struct cfq_queue *cfqq = cfqd->active_queue;
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1882
  	struct cfq_io_context *cic;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1883
  	unsigned long sl, group_idle = 0;
7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1884

a68bbddba   Jens Axboe   block: add queue ...
1885
  	/*
f7d7b7a7a   Jens Axboe   block: as/cfq ssd...
1886
1887
1888
  	 * SSD device without seek penalty, disable idling. But only do so
  	 * for devices that support queuing, otherwise we still have a problem
  	 * with sync vs async workloads.
a68bbddba   Jens Axboe   block: add queue ...
1889
  	 */
f7d7b7a7a   Jens Axboe   block: as/cfq ssd...
1890
  	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
a68bbddba   Jens Axboe   block: add queue ...
1891
  		return;
dd67d0515   Jens Axboe   [PATCH] rbtree: s...
1892
  	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f531   Jens Axboe   cfq-iosched: deve...
1893
  	WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507c   Jens Axboe   [PATCH] Update cf...
1894
1895
1896
1897
  
  	/*
  	 * idle is disabled, either manually or by past process history
  	 */
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1898
1899
1900
1901
1902
1903
1904
  	if (!cfq_should_idle(cfqd, cfqq)) {
  		/* no queue idling. Check for group idling */
  		if (cfqd->cfq_group_idle)
  			group_idle = cfqd->cfq_group_idle;
  		else
  			return;
  	}
6d048f531   Jens Axboe   cfq-iosched: deve...
1905

22e2c507c   Jens Axboe   [PATCH] Update cf...
1906
  	/*
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
1907
  	 * still active requests from this queue, don't idle
7b679138b   Jens Axboe   cfq-iosched: add ...
1908
  	 */
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
1909
  	if (cfqq->dispatched)
7b679138b   Jens Axboe   cfq-iosched: add ...
1910
1911
1912
  		return;
  
  	/*
22e2c507c   Jens Axboe   [PATCH] Update cf...
1913
1914
  	 * task has exited, don't wait
  	 */
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1915
  	cic = cfqd->active_cic;
66dac98ed   Nikanth Karthikesan   io_context sharin...
1916
  	if (!cic || !atomic_read(&cic->ioc->nr_tasks))
6d048f531   Jens Axboe   cfq-iosched: deve...
1917
  		return;
355b659c8   Corrado Zoccolo   cfq-iosched: avoi...
1918
1919
1920
1921
1922
  	/*
  	 * If our average think time is larger than the remaining time
  	 * slice, then don't idle. This avoids overrunning the allotted
  	 * time slice.
  	 */
383cd7213   Shaohua Li   CFQ: move think t...
1923
1924
  	if (sample_valid(cic->ttime.ttime_samples) &&
  	    (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
fd16d2631   Joe Perches   block: Add __attr...
1925
  		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
383cd7213   Shaohua Li   CFQ: move think t...
1926
  			     cic->ttime.ttime_mean);
355b659c8   Corrado Zoccolo   cfq-iosched: avoi...
1927
  		return;
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
1928
  	}
355b659c8   Corrado Zoccolo   cfq-iosched: avoi...
1929

80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1930
1931
1932
  	/* There are other queues in the group, don't do group idle */
  	if (group_idle && cfqq->cfqg->nr_cfqq > 1)
  		return;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1933
  	cfq_mark_cfqq_wait_request(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1934

80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1935
1936
1937
1938
  	if (group_idle)
  		sl = cfqd->cfq_group_idle;
  	else
  		sl = cfqd->cfq_slice_idle;
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
1939

7b14e3b52   Jens Axboe   [PATCH] cfq-iosch...
1940
  	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1941
  	cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1942
1943
  	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
  			group_idle ? 1 : 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1944
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
1945
1946
1947
  /*
   * Move request from internal lists to the request queue dispatch list.
   */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
1948
  static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1949
  {
3ed9a2965   Jens Axboe   cfq-iosched: impr...
1950
  	struct cfq_data *cfqd = q->elevator->elevator_data;
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
1951
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
1952

7b679138b   Jens Axboe   cfq-iosched: add ...
1953
  	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
06d218864   Jeff Moyer   cfq: choose a new...
1954
  	cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1955
  	cfq_remove_request(rq);
6d048f531   Jens Axboe   cfq-iosched: deve...
1956
  	cfqq->dispatched++;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
1957
  	(RQ_CFQG(rq))->dispatched++;
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
1958
  	elv_dispatch_sort(q, rq);
3ed9a2965   Jens Axboe   cfq-iosched: impr...
1959

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
1960
  	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
c4e7893eb   Vivek Goyal   cfq-iosched: blkt...
1961
  	cfqq->nr_sectors += blk_rq_sectors(rq);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
1962
  	cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
84c124da9   Divyesh Shah   blkio: Changes to...
1963
  					rq_data_dir(rq), rq_is_sync(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1964
1965
1966
1967
1968
  }
  
  /*
   * return expired entry, or NULL to just start from scratch in rbtree
   */
febffd618   Jens Axboe   cfq-iosched: kill...
1969
  static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1970
  {
30996f40b   Jens Axboe   cfq-iosched: fix ...
1971
  	struct request *rq = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1972

3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
1973
  	if (cfq_cfqq_fifo_expire(cfqq))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1974
  		return NULL;
cb8874119   Jens Axboe   cfq-iosched: twea...
1975
1976
  
  	cfq_mark_cfqq_fifo_expire(cfqq);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
1977
1978
  	if (list_empty(&cfqq->fifo))
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1979

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
1980
  	rq = rq_entry_fifo(cfqq->fifo.next);
30996f40b   Jens Axboe   cfq-iosched: fix ...
1981
  	if (time_before(jiffies, rq_fifo_time(rq)))
7b679138b   Jens Axboe   cfq-iosched: add ...
1982
  		rq = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1983

30996f40b   Jens Axboe   cfq-iosched: fix ...
1984
  	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
6d048f531   Jens Axboe   cfq-iosched: deve...
1985
  	return rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1986
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
1987
1988
1989
1990
  static inline int
  cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1991

22e2c507c   Jens Axboe   [PATCH] Update cf...
1992
  	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1993

b9f8ce059   Namhyung Kim   cfq-iosched: alge...
1994
  	return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1995
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
1996
  /*
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
1997
1998
1999
2000
2001
2002
2003
   * Must be called with the queue_lock held.
   */
  static int cfqq_process_refs(struct cfq_queue *cfqq)
  {
  	int process_refs, io_refs;
  
  	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
30d7b9448   Shaohua Li   block cfq: don't ...
2004
  	process_refs = cfqq->ref - io_refs;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2005
2006
2007
2008
2009
2010
  	BUG_ON(process_refs < 0);
  	return process_refs;
  }
  
  static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
  {
e6c5bc737   Jeff Moyer   cfq: break apart ...
2011
  	int process_refs, new_process_refs;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2012
  	struct cfq_queue *__cfqq;
c10b61f09   Jeff Moyer   cfq: Don't allow ...
2013
2014
2015
2016
2017
2018
2019
2020
  	/*
  	 * If there are no process references on the new_cfqq, then it is
  	 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
  	 * chain may have dropped their last reference (not just their
  	 * last process reference).
  	 */
  	if (!cfqq_process_refs(new_cfqq))
  		return;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2021
2022
2023
2024
2025
2026
2027
2028
  	/* Avoid a circular list and skip interim queue merges */
  	while ((__cfqq = new_cfqq->new_cfqq)) {
  		if (__cfqq == cfqq)
  			return;
  		new_cfqq = __cfqq;
  	}
  
  	process_refs = cfqq_process_refs(cfqq);
c10b61f09   Jeff Moyer   cfq: Don't allow ...
2029
  	new_process_refs = cfqq_process_refs(new_cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2030
2031
2032
2033
  	/*
  	 * If the process for the cfqq has gone away, there is no
  	 * sense in merging the queues.
  	 */
c10b61f09   Jeff Moyer   cfq: Don't allow ...
2034
  	if (process_refs == 0 || new_process_refs == 0)
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2035
  		return;
e6c5bc737   Jeff Moyer   cfq: break apart ...
2036
2037
2038
  	/*
  	 * Merge in the direction of the lesser amount of work.
  	 */
e6c5bc737   Jeff Moyer   cfq: break apart ...
2039
2040
  	if (new_process_refs >= process_refs) {
  		cfqq->new_cfqq = new_cfqq;
30d7b9448   Shaohua Li   block cfq: don't ...
2041
  		new_cfqq->ref += process_refs;
e6c5bc737   Jeff Moyer   cfq: break apart ...
2042
2043
  	} else {
  		new_cfqq->new_cfqq = cfqq;
30d7b9448   Shaohua Li   block cfq: don't ...
2044
  		cfqq->ref += new_process_refs;
e6c5bc737   Jeff Moyer   cfq: break apart ...
2045
  	}
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2046
  }
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2047
  static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2048
  				struct cfq_group *cfqg, enum wl_prio_t prio)
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2049
2050
2051
2052
2053
2054
  {
  	struct cfq_queue *queue;
  	int i;
  	bool key_valid = false;
  	unsigned long lowest_key = 0;
  	enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2055
2056
2057
  	for (i = 0; i <= SYNC_WORKLOAD; ++i) {
  		/* select the one with lowest rb_key */
  		queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
  		if (queue &&
  		    (!key_valid || time_before(queue->rb_key, lowest_key))) {
  			lowest_key = queue->rb_key;
  			cur_best = i;
  			key_valid = true;
  		}
  	}
  
  	return cur_best;
  }
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2068
  static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2069
  {
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2070
2071
  	unsigned slice;
  	unsigned count;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2072
  	struct cfq_rb_root *st;
58ff82f34   Vivek Goyal   blkio: Implement ...
2073
  	unsigned group_slice;
e4ea0c16a   Shaohua Li writes   block cfq: select...
2074
  	enum wl_prio_t original_prio = cfqd->serving_prio;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2075

718eee057   Corrado Zoccolo   cfq-iosched: fair...
2076
  	/* Choose next priority. RT > BE > IDLE */
58ff82f34   Vivek Goyal   blkio: Implement ...
2077
  	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2078
  		cfqd->serving_prio = RT_WORKLOAD;
58ff82f34   Vivek Goyal   blkio: Implement ...
2079
  	else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2080
2081
2082
2083
2084
2085
  		cfqd->serving_prio = BE_WORKLOAD;
  	else {
  		cfqd->serving_prio = IDLE_WORKLOAD;
  		cfqd->workload_expires = jiffies + 1;
  		return;
  	}
e4ea0c16a   Shaohua Li writes   block cfq: select...
2086
2087
  	if (original_prio != cfqd->serving_prio)
  		goto new_workload;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2088
2089
2090
2091
2092
  	/*
  	 * For RT and BE, we have to choose also the type
  	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
  	 * expiration time
  	 */
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2093
  	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2094
  	count = st->count;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2095
2096
  
  	/*
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2097
  	 * check workload expiration, and that we still have other queues ready
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2098
  	 */
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2099
  	if (count && !time_after(jiffies, cfqd->workload_expires))
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2100
  		return;
e4ea0c16a   Shaohua Li writes   block cfq: select...
2101
  new_workload:
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2102
2103
  	/* otherwise select new workload type */
  	cfqd->serving_type =
65b32a573   Vivek Goyal   cfq-iosched: Remo...
2104
2105
  		cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
  	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2106
  	count = st->count;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2107
2108
2109
2110
2111
2112
  
  	/*
  	 * the workload slice is computed as a fraction of target latency
  	 * proportional to the number of queues in that workload, over
  	 * all the queues in the same priority class
  	 */
58ff82f34   Vivek Goyal   blkio: Implement ...
2113
2114
2115
2116
2117
  	group_slice = cfq_group_slice(cfqd, cfqg);
  
  	slice = group_slice * count /
  		max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
  		      cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2118

f26bd1f0a   Vivek Goyal   blkio: Determine ...
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
  	if (cfqd->serving_type == ASYNC_WORKLOAD) {
  		unsigned int tmp;
  
  		/*
  		 * Async queues are currently system wide. Just taking
  		 * proportion of queues with-in same group will lead to higher
  		 * async ratio system wide as generally root group is going
  		 * to have higher weight. A more accurate thing would be to
  		 * calculate system wide asnc/sync ratio.
  		 */
  		tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
  		tmp = tmp/cfqd->busy_queues;
  		slice = min_t(unsigned, slice, tmp);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2132
2133
2134
  		/* async workload slice is scaled down according to
  		 * the sync/async slice ratio. */
  		slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
f26bd1f0a   Vivek Goyal   blkio: Determine ...
2135
  	} else
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2136
2137
2138
2139
  		/* sync workload slice is at least 2 * cfq_slice_idle */
  		slice = max(slice, 2 * cfqd->cfq_slice_idle);
  
  	slice = max_t(unsigned, slice, CFQ_MIN_TT);
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
2140
  	cfq_log(cfqd, "workload slice:%d", slice);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2141
2142
  	cfqd->workload_expires = jiffies + slice;
  }
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2143
2144
2145
  static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
  {
  	struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b077   Vivek Goyal   blkio: Introduce ...
2146
  	struct cfq_group *cfqg;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2147
2148
2149
  
  	if (RB_EMPTY_ROOT(&st->rb))
  		return NULL;
25bc6b077   Vivek Goyal   blkio: Introduce ...
2150
  	cfqg = cfq_rb_first_group(st);
25bc6b077   Vivek Goyal   blkio: Introduce ...
2151
2152
  	update_min_vdisktime(st);
  	return cfqg;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2153
  }
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2154
2155
  static void cfq_choose_cfqg(struct cfq_data *cfqd)
  {
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2156
2157
2158
  	struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
  
  	cfqd->serving_group = cfqg;
dae739ebc   Vivek Goyal   blkio: Group time...
2159
2160
2161
2162
2163
2164
  
  	/* Restore the workload type data */
  	if (cfqg->saved_workload_slice) {
  		cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
  		cfqd->serving_type = cfqg->saved_workload;
  		cfqd->serving_prio = cfqg->saved_serving_prio;
66ae29197   Gui Jianfeng   cfq: set workload...
2165
2166
  	} else
  		cfqd->workload_expires = jiffies - 1;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
2167
  	choose_service_tree(cfqd, cfqg);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2168
  }
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2169
  /*
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
2170
2171
   * Select a queue for service. If we have a current active queue,
   * check whether to continue servicing it, or retrieve and set a new one.
22e2c507c   Jens Axboe   [PATCH] Update cf...
2172
   */
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2173
  static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2174
  {
a36e71f99   Jens Axboe   cfq-iosched: add ...
2175
  	struct cfq_queue *cfqq, *new_cfqq = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2176

22e2c507c   Jens Axboe   [PATCH] Update cf...
2177
2178
2179
  	cfqq = cfqd->active_queue;
  	if (!cfqq)
  		goto new_queue;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2180

f04a64246   Vivek Goyal   blkio: Keep queue...
2181
2182
  	if (!cfqd->rq_queued)
  		return NULL;
c244bb50a   Vivek Goyal   cfq-iosched: Get ...
2183
2184
2185
2186
2187
2188
  
  	/*
  	 * We were waiting for group to get backlogged. Expire the queue
  	 */
  	if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
  		goto expire;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2189
  	/*
6d048f531   Jens Axboe   cfq-iosched: deve...
2190
  	 * The active queue has run out of time, expire it and select new.
22e2c507c   Jens Axboe   [PATCH] Update cf...
2191
  	 */
7667aa063   Vivek Goyal   cfq-iosched: Take...
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
  	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
  		/*
  		 * If slice had not expired at the completion of last request
  		 * we might not have turned on wait_busy flag. Don't expire
  		 * the queue yet. Allow the group to get backlogged.
  		 *
  		 * The very fact that we have used the slice, that means we
  		 * have been idling all along on this queue and it should be
  		 * ok to wait for this request to complete.
  		 */
82bbbf28d   Vivek Goyal   Fix a CFQ crash i...
2202
2203
2204
  		if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
  		    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  			cfqq = NULL;
7667aa063   Vivek Goyal   cfq-iosched: Take...
2205
  			goto keep_queue;
82bbbf28d   Vivek Goyal   Fix a CFQ crash i...
2206
  		} else
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
2207
  			goto check_group_idle;
7667aa063   Vivek Goyal   cfq-iosched: Take...
2208
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2209

22e2c507c   Jens Axboe   [PATCH] Update cf...
2210
  	/*
6d048f531   Jens Axboe   cfq-iosched: deve...
2211
2212
  	 * The active queue has requests and isn't expired, allow it to
  	 * dispatch.
22e2c507c   Jens Axboe   [PATCH] Update cf...
2213
  	 */
dd67d0515   Jens Axboe   [PATCH] rbtree: s...
2214
  	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507c   Jens Axboe   [PATCH] Update cf...
2215
  		goto keep_queue;
6d048f531   Jens Axboe   cfq-iosched: deve...
2216
2217
  
  	/*
a36e71f99   Jens Axboe   cfq-iosched: add ...
2218
2219
2220
  	 * If another queue has a request waiting within our mean seek
  	 * distance, let it run.  The expire code will check for close
  	 * cooperators and put the close queue at the front of the service
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2221
  	 * tree.  If possible, merge the expiring queue with the new cfqq.
a36e71f99   Jens Axboe   cfq-iosched: add ...
2222
  	 */
b3b6d0408   Jeff Moyer   cfq: change the m...
2223
  	new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2224
2225
2226
  	if (new_cfqq) {
  		if (!cfqq->new_cfqq)
  			cfq_setup_merge(cfqq, new_cfqq);
a36e71f99   Jens Axboe   cfq-iosched: add ...
2227
  		goto expire;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2228
  	}
a36e71f99   Jens Axboe   cfq-iosched: add ...
2229
2230
  
  	/*
6d048f531   Jens Axboe   cfq-iosched: deve...
2231
2232
2233
2234
  	 * No requests pending. If the active queue still has requests in
  	 * flight or is idling for a new request, allow either of these
  	 * conditions to happen (or time out) before selecting a new queue.
  	 */
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
2235
2236
2237
2238
  	if (timer_pending(&cfqd->idle_slice_timer)) {
  		cfqq = NULL;
  		goto keep_queue;
  	}
8e1ac6655   Shaohua Li   cfq-iosched: don'...
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
  	/*
  	 * This is a deep seek queue, but the device is much faster than
  	 * the queue can deliver, don't idle
  	 **/
  	if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
  	    (cfq_cfqq_slice_new(cfqq) ||
  	    (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
  		cfq_clear_cfqq_deep(cfqq);
  		cfq_clear_cfqq_idle_window(cfqq);
  	}
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
  	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  		cfqq = NULL;
  		goto keep_queue;
  	}
  
  	/*
  	 * If group idle is enabled and there are requests dispatched from
  	 * this group, wait for requests to complete.
  	 */
  check_group_idle:
7700fc4f6   Shaohua Li   CFQ: add think ti...
2259
2260
2261
  	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
  	    cfqq->cfqg->dispatched &&
  	    !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2262
2263
  		cfqq = NULL;
  		goto keep_queue;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2264
  	}
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2265
  expire:
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2266
  	cfq_slice_expired(cfqd, 0);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2267
  new_queue:
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2268
2269
2270
2271
2272
  	/*
  	 * Current queue expired. Check if we have to switch to a new
  	 * service tree
  	 */
  	if (!new_cfqq)
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2273
  		cfq_choose_cfqg(cfqd);
718eee057   Corrado Zoccolo   cfq-iosched: fair...
2274

a36e71f99   Jens Axboe   cfq-iosched: add ...
2275
  	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2276
  keep_queue:
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2277
  	return cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2278
  }
febffd618   Jens Axboe   cfq-iosched: kill...
2279
  static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2280
2281
2282
2283
2284
2285
2286
2287
2288
  {
  	int dispatched = 0;
  
  	while (cfqq->next_rq) {
  		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
  		dispatched++;
  	}
  
  	BUG_ON(!list_empty(&cfqq->fifo));
f04a64246   Vivek Goyal   blkio: Keep queue...
2289
2290
  
  	/* By default cfqq is not expired if it is empty. Do it explicitly */
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2291
  	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2292
2293
  	return dispatched;
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
2294
2295
2296
2297
  /*
   * Drain our current requests. Used for barriers and when switching
   * io schedulers on-the-fly.
   */
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2298
  static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2299
  {
0871714e0   Jens Axboe   cfq-iosched: rela...
2300
  	struct cfq_queue *cfqq;
d9e7620e6   Jens Axboe   cfq-iosched: rewo...
2301
  	int dispatched = 0;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2302

3440c49f5   Divyesh Shah   cfq-iosched: Fix ...
2303
  	/* Expire the timeslice of the current active queue first */
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2304
  	cfq_slice_expired(cfqd, 0);
3440c49f5   Divyesh Shah   cfq-iosched: Fix ...
2305
2306
  	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
  		__cfq_set_active_queue(cfqd, cfqq);
f04a64246   Vivek Goyal   blkio: Keep queue...
2307
  		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3440c49f5   Divyesh Shah   cfq-iosched: Fix ...
2308
  	}
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2309

1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2310
  	BUG_ON(cfqd->busy_queues);
6923715ae   Jeff Moyer   cfq: remove extra...
2311
  	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1f   Tejun Heo   [BLOCK] cfq-iosch...
2312
2313
  	return dispatched;
  }
abc3c744d   Shaohua Li   cfq-iosched: quan...
2314
2315
2316
2317
2318
  static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
  	struct cfq_queue *cfqq)
  {
  	/* the queue hasn't finished any request, can't estimate */
  	if (cfq_cfqq_slice_new(cfqq))
c1e44756f   Shaohua Li   cfq-iosched: do c...
2319
  		return true;
abc3c744d   Shaohua Li   cfq-iosched: quan...
2320
2321
  	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
  		cfqq->slice_end))
c1e44756f   Shaohua Li   cfq-iosched: do c...
2322
  		return true;
abc3c744d   Shaohua Li   cfq-iosched: quan...
2323

c1e44756f   Shaohua Li   cfq-iosched: do c...
2324
  	return false;
abc3c744d   Shaohua Li   cfq-iosched: quan...
2325
  }
0b182d617   Jens Axboe   cfq-iosched: abst...
2326
  static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2327
  {
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2328
  	unsigned int max_dispatch;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2329

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2330
  	/*
5ad531db6   Jens Axboe   cfq-iosched: drai...
2331
2332
  	 * Drain async requests before we start sync IO
  	 */
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
2333
  	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
0b182d617   Jens Axboe   cfq-iosched: abst...
2334
  		return false;
5ad531db6   Jens Axboe   cfq-iosched: drai...
2335
2336
  
  	/*
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2337
2338
  	 * If this is an async queue and we have sync IO in flight, let it wait
  	 */
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
2339
  	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
0b182d617   Jens Axboe   cfq-iosched: abst...
2340
  		return false;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2341

abc3c744d   Shaohua Li   cfq-iosched: quan...
2342
  	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2343
2344
  	if (cfq_class_idle(cfqq))
  		max_dispatch = 1;
b4878f245   Jens Axboe   [PATCH] 02/05: up...
2345

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2346
2347
2348
2349
  	/*
  	 * Does this cfqq already have too much IO in flight?
  	 */
  	if (cfqq->dispatched >= max_dispatch) {
ef8a41df8   Shaohua Li   cfq-iosched: give...
2350
  		bool promote_sync = false;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2351
2352
2353
  		/*
  		 * idle queue must always only have a single IO in flight
  		 */
3ed9a2965   Jens Axboe   cfq-iosched: impr...
2354
  		if (cfq_class_idle(cfqq))
0b182d617   Jens Axboe   cfq-iosched: abst...
2355
  			return false;
3ed9a2965   Jens Axboe   cfq-iosched: impr...
2356

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2357
  		/*
c4ade94fc   Li, Shaohua   cfq-iosched: remo...
2358
2359
  		 * If there is only one sync queue
  		 * we can ignore async queue here and give the sync
ef8a41df8   Shaohua Li   cfq-iosched: give...
2360
2361
2362
2363
  		 * queue no dispatch limit. The reason is a sync queue can
  		 * preempt async queue, limiting the sync queue doesn't make
  		 * sense. This is useful for aiostress test.
  		 */
c4ade94fc   Li, Shaohua   cfq-iosched: remo...
2364
2365
  		if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
  			promote_sync = true;
ef8a41df8   Shaohua Li   cfq-iosched: give...
2366
2367
  
  		/*
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2368
2369
  		 * We have other queues, don't allow more IO from this one
  		 */
ef8a41df8   Shaohua Li   cfq-iosched: give...
2370
2371
  		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
  				!promote_sync)
0b182d617   Jens Axboe   cfq-iosched: abst...
2372
  			return false;
9ede209e8   Jens Axboe   cfq-iosched: impr...
2373

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2374
  		/*
474b18ccc   Shaohua Li   cfq-iosched: no d...
2375
  		 * Sole queue user, no limit
365722bb9   Vivek Goyal   cfq-iosched: dela...
2376
  		 */
ef8a41df8   Shaohua Li   cfq-iosched: give...
2377
  		if (cfqd->busy_queues == 1 || promote_sync)
abc3c744d   Shaohua Li   cfq-iosched: quan...
2378
2379
2380
2381
2382
2383
2384
2385
2386
  			max_dispatch = -1;
  		else
  			/*
  			 * Normally we start throttling cfqq when cfq_quantum/2
  			 * requests have been dispatched. But we can drive
  			 * deeper queue depths at the beginning of slice
  			 * subjected to upper limit of cfq_quantum.
  			 * */
  			max_dispatch = cfqd->cfq_quantum;
8e2967555   Jens Axboe   cfq-iosched: impl...
2387
2388
2389
2390
2391
2392
2393
  	}
  
  	/*
  	 * Async queues must wait a bit before being allowed dispatch.
  	 * We also ramp up the dispatch depth gradually for async IO,
  	 * based on the last sync IO we serviced
  	 */
963b72fc6   Jens Axboe   cfq-iosched: rena...
2394
  	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
573412b29   Corrado Zoccolo   cfq-iosched: redu...
2395
  		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
8e2967555   Jens Axboe   cfq-iosched: impl...
2396
  		unsigned int depth;
365722bb9   Vivek Goyal   cfq-iosched: dela...
2397

61f0c1dca   Jens Axboe   cfq-iosched: use ...
2398
  		depth = last_sync / cfqd->cfq_slice[1];
e00c54c36   Jens Axboe   cfq-iosched: don'...
2399
2400
  		if (!depth && !cfqq->dispatched)
  			depth = 1;
8e2967555   Jens Axboe   cfq-iosched: impl...
2401
2402
  		if (depth < max_dispatch)
  			max_dispatch = depth;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2403
  	}
3ed9a2965   Jens Axboe   cfq-iosched: impr...
2404

0b182d617   Jens Axboe   cfq-iosched: abst...
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
  	/*
  	 * If we're below the current max, allow a dispatch
  	 */
  	return cfqq->dispatched < max_dispatch;
  }
  
  /*
   * Dispatch a request from cfqq, moving them to the request queue
   * dispatch list.
   */
  static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	struct request *rq;
  
  	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
  
  	if (!cfq_may_dispatch(cfqd, cfqq))
  		return false;
  
  	/*
  	 * follow expired path, else get first next available
  	 */
  	rq = cfq_check_fifo(cfqq);
  	if (!rq)
  		rq = cfqq->next_rq;
  
  	/*
  	 * insert request into driver dispatch list
  	 */
  	cfq_dispatch_insert(cfqd->queue, rq);
  
  	if (!cfqd->active_cic) {
  		struct cfq_io_context *cic = RQ_CIC(rq);
  
  		atomic_long_inc(&cic->ioc->refcount);
  		cfqd->active_cic = cic;
  	}
  
  	return true;
  }
  
  /*
   * Find the cfqq that we need to service and move a request from that to the
   * dispatch list
   */
  static int cfq_dispatch_requests(struct request_queue *q, int force)
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct cfq_queue *cfqq;
  
  	if (!cfqd->busy_queues)
  		return 0;
  
  	if (unlikely(force))
  		return cfq_forced_dispatch(cfqd);
  
  	cfqq = cfq_select_queue(cfqd);
  	if (!cfqq)
8e2967555   Jens Axboe   cfq-iosched: impl...
2463
  		return 0;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2464
  	/*
0b182d617   Jens Axboe   cfq-iosched: abst...
2465
  	 * Dispatch a request from this cfqq, if it is allowed
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2466
  	 */
0b182d617   Jens Axboe   cfq-iosched: abst...
2467
2468
  	if (!cfq_dispatch_request(cfqd, cfqq))
  		return 0;
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2469
  	cfqq->slice_dispatch++;
b029195dd   Jens Axboe   cfq-iosched: don'...
2470
  	cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2471

2f5cb7381   Jens Axboe   cfq-iosched: chan...
2472
2473
2474
2475
2476
2477
2478
2479
  	/*
  	 * expire an async queue immediately if it has used up its slice. idle
  	 * queue always expire after 1 dispatch round.
  	 */
  	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
  	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  	    cfq_class_idle(cfqq))) {
  		cfqq->slice_end = jiffies + 1;
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2480
  		cfq_slice_expired(cfqd, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2481
  	}
b217a903a   Shan Wei   cfq: fix the log ...
2482
  	cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb7381   Jens Axboe   cfq-iosched: chan...
2483
  	return 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2484
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2485
  /*
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
2486
2487
   * task holds one reference to the queue, dropped when task exits. each rq
   * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2488
   *
b1c357696   Vivek Goyal   blkio: Take care ...
2489
   * Each cfq queue took a reference on the parent group. Drop it now.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2490
2491
2492
2493
   * queue lock must be held here.
   */
  static void cfq_put_queue(struct cfq_queue *cfqq)
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
2494
  	struct cfq_data *cfqd = cfqq->cfqd;
0bbfeb832   Justin TerAvest   cfq-iosched: Alwa...
2495
  	struct cfq_group *cfqg;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2496

30d7b9448   Shaohua Li   block cfq: don't ...
2497
  	BUG_ON(cfqq->ref <= 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2498

30d7b9448   Shaohua Li   block cfq: don't ...
2499
2500
  	cfqq->ref--;
  	if (cfqq->ref)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2501
  		return;
7b679138b   Jens Axboe   cfq-iosched: add ...
2502
  	cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2503
  	BUG_ON(rb_first(&cfqq->sort_list));
22e2c507c   Jens Axboe   [PATCH] Update cf...
2504
  	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
b1c357696   Vivek Goyal   blkio: Take care ...
2505
  	cfqg = cfqq->cfqg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2506

28f95cbc3   Jens Axboe   cfq-iosched: remo...
2507
  	if (unlikely(cfqd->active_queue == cfqq)) {
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
2508
  		__cfq_slice_expired(cfqd, cfqq, 0);
23e018a1b   Jens Axboe   block: get rid of...
2509
  		cfq_schedule_dispatch(cfqd);
28f95cbc3   Jens Axboe   cfq-iosched: remo...
2510
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
2511

f04a64246   Vivek Goyal   blkio: Keep queue...
2512
  	BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2513
  	kmem_cache_free(cfq_pool, cfqq);
b1c357696   Vivek Goyal   blkio: Take care ...
2514
  	cfq_put_cfqg(cfqg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2515
  }
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2516
  /*
5f45c6958   Jens Axboe   cfq-iosched: read...
2517
   * Call func for each cic attached to this ioc.
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2518
   */
07416d29b   Jens Axboe   cfq-iosched: fix ...
2519
  static void
5f45c6958   Jens Axboe   cfq-iosched: read...
2520
2521
  call_for_each_cic(struct io_context *ioc,
  		  void (*func)(struct io_context *, struct cfq_io_context *))
07416d29b   Jens Axboe   cfq-iosched: fix ...
2522
2523
2524
  {
  	struct cfq_io_context *cic;
  	struct hlist_node *n;
5f45c6958   Jens Axboe   cfq-iosched: read...
2525
  	rcu_read_lock();
07416d29b   Jens Axboe   cfq-iosched: fix ...
2526
2527
  	hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
  		func(ioc, cic);
07416d29b   Jens Axboe   cfq-iosched: fix ...
2528

4ac845a2e   Jens Axboe   block: cfq: make ...
2529
  	rcu_read_unlock();
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2530
2531
2532
2533
2534
2535
2536
2537
2538
  }
  
  static void cfq_cic_free_rcu(struct rcu_head *head)
  {
  	struct cfq_io_context *cic;
  
  	cic = container_of(head, struct cfq_io_context, rcu_head);
  
  	kmem_cache_free(cfq_ioc_pool, cic);
245b2e70e   Tejun Heo   percpu: clean up ...
2539
  	elv_ioc_count_dec(cfq_ioc_count);
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2540

9a11b4ed0   Jens Axboe   cfq-iosched: prop...
2541
2542
2543
2544
2545
2546
2547
  	if (ioc_gone) {
  		/*
  		 * CFQ scheduler is exiting, grab exit lock and check
  		 * the pending io context count. If it hits zero,
  		 * complete ioc_gone and set it back to NULL
  		 */
  		spin_lock(&ioc_gone_lock);
245b2e70e   Tejun Heo   percpu: clean up ...
2548
  		if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
9a11b4ed0   Jens Axboe   cfq-iosched: prop...
2549
2550
2551
2552
2553
  			complete(ioc_gone);
  			ioc_gone = NULL;
  		}
  		spin_unlock(&ioc_gone_lock);
  	}
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2554
  }
4ac845a2e   Jens Axboe   block: cfq: make ...
2555

34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2556
2557
2558
  static void cfq_cic_free(struct cfq_io_context *cic)
  {
  	call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
4ac845a2e   Jens Axboe   block: cfq: make ...
2559
2560
2561
2562
2563
  }
  
  static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
  {
  	unsigned long flags;
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2564
  	unsigned long dead_key = (unsigned long) cic->key;
4ac845a2e   Jens Axboe   block: cfq: make ...
2565

bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2566
  	BUG_ON(!(dead_key & CIC_DEAD_KEY));
4ac845a2e   Jens Axboe   block: cfq: make ...
2567
2568
  
  	spin_lock_irqsave(&ioc->lock, flags);
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
2569
  	radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
2570
  	hlist_del_rcu(&cic->cic_list);
4ac845a2e   Jens Axboe   block: cfq: make ...
2571
  	spin_unlock_irqrestore(&ioc->lock, flags);
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2572
  	cfq_cic_free(cic);
4ac845a2e   Jens Axboe   block: cfq: make ...
2573
  }
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2574
2575
2576
2577
2578
  /*
   * Must be called with rcu_read_lock() held or preemption otherwise disabled.
   * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
   * and ->trim() which is called with the task lock held
   */
4ac845a2e   Jens Axboe   block: cfq: make ...
2579
2580
  static void cfq_free_io_context(struct io_context *ioc)
  {
4ac845a2e   Jens Axboe   block: cfq: make ...
2581
  	/*
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
2582
2583
2584
2585
  	 * ioc->refcount is zero here, or we are called from elv_unregister(),
  	 * so no more cic's are allowed to be linked into this ioc.  So it
  	 * should be ok to iterate over the known list, we will see all cic's
  	 * since no new ones are added.
4ac845a2e   Jens Axboe   block: cfq: make ...
2586
  	 */
5f45c6958   Jens Axboe   cfq-iosched: read...
2587
  	call_for_each_cic(ioc, cic_free_func);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2588
  }
d02a2c077   Shaohua Li   cfq-iosched: fix ...
2589
  static void cfq_put_cooperator(struct cfq_queue *cfqq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2590
  {
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2591
  	struct cfq_queue *__cfqq, *next;
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
  	/*
  	 * If this queue was scheduled to merge with another queue, be
  	 * sure to drop the reference taken on that queue (and others in
  	 * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
  	 */
  	__cfqq = cfqq->new_cfqq;
  	while (__cfqq) {
  		if (__cfqq == cfqq) {
  			WARN(1, "cfqq->new_cfqq loop detected
  ");
  			break;
  		}
  		next = __cfqq->new_cfqq;
  		cfq_put_queue(__cfqq);
  		__cfqq = next;
  	}
d02a2c077   Shaohua Li   cfq-iosched: fix ...
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
  }
  
  static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	if (unlikely(cfqq == cfqd->active_queue)) {
  		__cfq_slice_expired(cfqd, cfqq, 0);
  		cfq_schedule_dispatch(cfqd);
  	}
  
  	cfq_put_cooperator(cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
2618

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2619
2620
  	cfq_put_queue(cfqq);
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
2621

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2622
2623
2624
  static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
  					 struct cfq_io_context *cic)
  {
4faa3c815   Fabio Checconi   cfq-iosched: do n...
2625
  	struct io_context *ioc = cic->ioc;
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2626
  	list_del_init(&cic->queue_list);
4ac845a2e   Jens Axboe   block: cfq: make ...
2627
2628
  
  	/*
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2629
  	 * Make sure dead mark is seen for dead queues
4ac845a2e   Jens Axboe   block: cfq: make ...
2630
  	 */
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2631
  	smp_wmb();
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2632
  	cic->key = cfqd_dead_key(cfqd);
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2633

3181faa85   Shaohua Li   cfq-iosched: fix ...
2634
  	rcu_read_lock();
9b50902db   Jens Axboe   cfq-iosched: fix ...
2635
  	if (rcu_dereference(ioc->ioc_data) == cic) {
3181faa85   Shaohua Li   cfq-iosched: fix ...
2636
  		rcu_read_unlock();
9b50902db   Jens Axboe   cfq-iosched: fix ...
2637
  		spin_lock(&ioc->lock);
4faa3c815   Fabio Checconi   cfq-iosched: do n...
2638
  		rcu_assign_pointer(ioc->ioc_data, NULL);
9b50902db   Jens Axboe   cfq-iosched: fix ...
2639
  		spin_unlock(&ioc->lock);
3181faa85   Shaohua Li   cfq-iosched: fix ...
2640
2641
  	} else
  		rcu_read_unlock();
4faa3c815   Fabio Checconi   cfq-iosched: do n...
2642

ff6657c6c   Jens Axboe   cfq-iosched: get ...
2643
2644
2645
  	if (cic->cfqq[BLK_RW_ASYNC]) {
  		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
  		cic->cfqq[BLK_RW_ASYNC] = NULL;
12a057321   Al Viro   [PATCH] keep sync...
2646
  	}
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2647
2648
2649
  	if (cic->cfqq[BLK_RW_SYNC]) {
  		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
  		cic->cfqq[BLK_RW_SYNC] = NULL;
12a057321   Al Viro   [PATCH] keep sync...
2650
  	}
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2651
  }
4ac845a2e   Jens Axboe   block: cfq: make ...
2652
2653
  static void cfq_exit_single_io_context(struct io_context *ioc,
  				       struct cfq_io_context *cic)
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2654
  {
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2655
  	struct cfq_data *cfqd = cic_to_cfqd(cic);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2656

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2657
  	if (cfqd) {
165125e1e   Jens Axboe   [BLOCK] Get rid o...
2658
  		struct request_queue *q = cfqd->queue;
4ac845a2e   Jens Axboe   block: cfq: make ...
2659
  		unsigned long flags;
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2660

4ac845a2e   Jens Axboe   block: cfq: make ...
2661
  		spin_lock_irqsave(q->queue_lock, flags);
62c1fe9d9   Jens Axboe   cfq-iosched: fix ...
2662
2663
2664
2665
2666
2667
  
  		/*
  		 * Ensure we get a fresh copy of the ->key to prevent
  		 * race between exiting task and queue
  		 */
  		smp_read_barrier_depends();
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2668
  		if (cic->key == cfqd)
62c1fe9d9   Jens Axboe   cfq-iosched: fix ...
2669
  			__cfq_exit_single_io_context(cfqd, cic);
4ac845a2e   Jens Axboe   block: cfq: make ...
2670
  		spin_unlock_irqrestore(q->queue_lock, flags);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
2671
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2672
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
2673
2674
2675
2676
  /*
   * The process that ioc belongs to has exited, we need to clean up
   * and put the internal structures we have that belongs to that process.
   */
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2677
  static void cfq_exit_io_context(struct io_context *ioc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2678
  {
4ac845a2e   Jens Axboe   block: cfq: make ...
2679
  	call_for_each_cic(ioc, cfq_exit_single_io_context);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2680
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
2681
  static struct cfq_io_context *
8267e268e   Al Viro   [PATCH] gfp_t: bl...
2682
  cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2683
  {
b5deef901   Jens Axboe   [PATCH] Make sure...
2684
  	struct cfq_io_context *cic;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2685

94f6030ca   Christoph Lameter   Slab allocators: ...
2686
2687
  	cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
  							cfqd->queue->node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2688
  	if (cic) {
383cd7213   Shaohua Li   CFQ: move think t...
2689
  		cic->ttime.last_end_request = jiffies;
553698f94   Jens Axboe   [PATCH] cfq-iosch...
2690
  		INIT_LIST_HEAD(&cic->queue_list);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
2691
  		INIT_HLIST_NODE(&cic->cic_list);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2692
2693
  		cic->dtor = cfq_free_io_context;
  		cic->exit = cfq_exit_io_context;
245b2e70e   Tejun Heo   percpu: clean up ...
2694
  		elv_ioc_count_inc(cfq_ioc_count);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2695
2696
2697
2698
  	}
  
  	return cic;
  }
fd0928df9   Jens Axboe   ioprio: move io p...
2699
  static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2700
2701
2702
  {
  	struct task_struct *tsk = current;
  	int ioprio_class;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2703
  	if (!cfq_cfqq_prio_changed(cfqq))
22e2c507c   Jens Axboe   [PATCH] Update cf...
2704
  		return;
fd0928df9   Jens Axboe   ioprio: move io p...
2705
  	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2706
  	switch (ioprio_class) {
fe094d98e   Jens Axboe   cfq-iosched: make...
2707
2708
2709
2710
2711
  	default:
  		printk(KERN_ERR "cfq: bad prio %x
  ", ioprio_class);
  	case IOPRIO_CLASS_NONE:
  		/*
6d63c2755   Jens Axboe   cfq-iosched: make...
2712
  		 * no prio set, inherit CPU scheduling settings
fe094d98e   Jens Axboe   cfq-iosched: make...
2713
2714
  		 */
  		cfqq->ioprio = task_nice_ioprio(tsk);
6d63c2755   Jens Axboe   cfq-iosched: make...
2715
  		cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98e   Jens Axboe   cfq-iosched: make...
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
  		break;
  	case IOPRIO_CLASS_RT:
  		cfqq->ioprio = task_ioprio(ioc);
  		cfqq->ioprio_class = IOPRIO_CLASS_RT;
  		break;
  	case IOPRIO_CLASS_BE:
  		cfqq->ioprio = task_ioprio(ioc);
  		cfqq->ioprio_class = IOPRIO_CLASS_BE;
  		break;
  	case IOPRIO_CLASS_IDLE:
  		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  		cfqq->ioprio = 7;
  		cfq_clear_cfqq_idle_window(cfqq);
  		break;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2730
2731
2732
2733
2734
2735
2736
  	}
  
  	/*
  	 * keep track of original prio settings in case we have to temporarily
  	 * elevate the priority of this queue
  	 */
  	cfqq->org_ioprio = cfqq->ioprio;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
2737
  	cfq_clear_cfqq_prio_changed(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2738
  }
febffd618   Jens Axboe   cfq-iosched: kill...
2739
  static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2740
  {
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2741
  	struct cfq_data *cfqd = cic_to_cfqd(cic);
478a82b0e   Al Viro   [PATCH] switch to...
2742
  	struct cfq_queue *cfqq;
c1b707d25   Jens Axboe   [PATCH] CFQ: bad ...
2743
  	unsigned long flags;
35e6077cb   Jens Axboe   [PATCH] cfq-iosch...
2744

caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2745
2746
  	if (unlikely(!cfqd))
  		return;
c1b707d25   Jens Axboe   [PATCH] CFQ: bad ...
2747
  	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2748

ff6657c6c   Jens Axboe   cfq-iosched: get ...
2749
  	cfqq = cic->cfqq[BLK_RW_ASYNC];
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2750
2751
  	if (cfqq) {
  		struct cfq_queue *new_cfqq;
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2752
2753
  		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
  						GFP_ATOMIC);
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2754
  		if (new_cfqq) {
ff6657c6c   Jens Axboe   cfq-iosched: get ...
2755
  			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2756
2757
  			cfq_put_queue(cfqq);
  		}
22e2c507c   Jens Axboe   [PATCH] Update cf...
2758
  	}
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2759

ff6657c6c   Jens Axboe   cfq-iosched: get ...
2760
  	cfqq = cic->cfqq[BLK_RW_SYNC];
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
2761
2762
  	if (cfqq)
  		cfq_mark_cfqq_prio_changed(cfqq);
c1b707d25   Jens Axboe   [PATCH] CFQ: bad ...
2763
  	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2764
  }
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2765
  static void cfq_ioc_set_ioprio(struct io_context *ioc)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2766
  {
4ac845a2e   Jens Axboe   block: cfq: make ...
2767
  	call_for_each_cic(ioc, changed_ioprio);
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2768
  	ioc->ioprio_changed = 0;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2769
  }
d5036d770   Jens Axboe   cfq-iosched: move...
2770
  static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2771
  			  pid_t pid, bool is_sync)
d5036d770   Jens Axboe   cfq-iosched: move...
2772
2773
2774
2775
  {
  	RB_CLEAR_NODE(&cfqq->rb_node);
  	RB_CLEAR_NODE(&cfqq->p_node);
  	INIT_LIST_HEAD(&cfqq->fifo);
30d7b9448   Shaohua Li   block cfq: don't ...
2776
  	cfqq->ref = 0;
d5036d770   Jens Axboe   cfq-iosched: move...
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
  	cfqq->cfqd = cfqd;
  
  	cfq_mark_cfqq_prio_changed(cfqq);
  
  	if (is_sync) {
  		if (!cfq_class_idle(cfqq))
  			cfq_mark_cfqq_idle_window(cfqq);
  		cfq_mark_cfqq_sync(cfqq);
  	}
  	cfqq->pid = pid;
  }
24610333d   Vivek Goyal   blkio: Drop the r...
2788
2789
2790
2791
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
  {
  	struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2792
  	struct cfq_data *cfqd = cic_to_cfqd(cic);
24610333d   Vivek Goyal   blkio: Drop the r...
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
  	unsigned long flags;
  	struct request_queue *q;
  
  	if (unlikely(!cfqd))
  		return;
  
  	q = cfqd->queue;
  
  	spin_lock_irqsave(q->queue_lock, flags);
  
  	if (sync_cfqq) {
  		/*
  		 * Drop reference to sync queue. A new sync queue will be
  		 * assigned in new group upon arrival of a fresh request.
  		 */
  		cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
  		cic_set_cfqq(cic, NULL, 1);
  		cfq_put_queue(sync_cfqq);
  	}
  
  	spin_unlock_irqrestore(q->queue_lock, flags);
  }
  
  static void cfq_ioc_set_cgroup(struct io_context *ioc)
  {
  	call_for_each_cic(ioc, changed_cgroup);
  	ioc->cgroup_changed = 0;
  }
  #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
22e2c507c   Jens Axboe   [PATCH] Update cf...
2822
  static struct cfq_queue *
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2823
  cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
fd0928df9   Jens Axboe   ioprio: move io p...
2824
  		     struct io_context *ioc, gfp_t gfp_mask)
22e2c507c   Jens Axboe   [PATCH] Update cf...
2825
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
2826
  	struct cfq_queue *cfqq, *new_cfqq = NULL;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
2827
  	struct cfq_io_context *cic;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2828
  	struct cfq_group *cfqg;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2829
2830
  
  retry:
3e59cf9d6   Vivek Goyal   cfq-iosched: Get ...
2831
  	cfqg = cfq_get_cfqg(cfqd);
4ac845a2e   Jens Axboe   block: cfq: make ...
2832
  	cic = cfq_cic_lookup(cfqd, ioc);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
2833
2834
  	/* cic always exists here */
  	cfqq = cic_to_cfqq(cic, is_sync);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2835

6118b70b3   Jens Axboe   cfq-iosched: get ...
2836
2837
2838
2839
2840
2841
  	/*
  	 * Always try a new alloc if we fell back to the OOM cfqq
  	 * originally, since it should just be a temporary situation.
  	 */
  	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  		cfqq = NULL;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2842
2843
2844
2845
2846
  		if (new_cfqq) {
  			cfqq = new_cfqq;
  			new_cfqq = NULL;
  		} else if (gfp_mask & __GFP_WAIT) {
  			spin_unlock_irq(cfqd->queue->queue_lock);
94f6030ca   Christoph Lameter   Slab allocators: ...
2847
  			new_cfqq = kmem_cache_alloc_node(cfq_pool,
6118b70b3   Jens Axboe   cfq-iosched: get ...
2848
  					gfp_mask | __GFP_ZERO,
94f6030ca   Christoph Lameter   Slab allocators: ...
2849
  					cfqd->queue->node);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2850
  			spin_lock_irq(cfqd->queue->queue_lock);
6118b70b3   Jens Axboe   cfq-iosched: get ...
2851
2852
  			if (new_cfqq)
  				goto retry;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2853
  		} else {
94f6030ca   Christoph Lameter   Slab allocators: ...
2854
2855
2856
  			cfqq = kmem_cache_alloc_node(cfq_pool,
  					gfp_mask | __GFP_ZERO,
  					cfqd->queue->node);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2857
  		}
6118b70b3   Jens Axboe   cfq-iosched: get ...
2858
2859
2860
  		if (cfqq) {
  			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
  			cfq_init_prio_data(cfqq, ioc);
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
2861
  			cfq_link_cfqq_cfqg(cfqq, cfqg);
6118b70b3   Jens Axboe   cfq-iosched: get ...
2862
2863
2864
  			cfq_log_cfqq(cfqd, cfqq, "alloced");
  		} else
  			cfqq = &cfqd->oom_cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
2865
2866
2867
2868
  	}
  
  	if (new_cfqq)
  		kmem_cache_free(cfq_pool, new_cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
2869
2870
  	return cfqq;
  }
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2871
2872
2873
  static struct cfq_queue **
  cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
  {
fe094d98e   Jens Axboe   cfq-iosched: make...
2874
  	switch (ioprio_class) {
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
  	case IOPRIO_CLASS_RT:
  		return &cfqd->async_cfqq[0][ioprio];
  	case IOPRIO_CLASS_BE:
  		return &cfqd->async_cfqq[1][ioprio];
  	case IOPRIO_CLASS_IDLE:
  		return &cfqd->async_idle_cfqq;
  	default:
  		BUG();
  	}
  }
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2885
  static struct cfq_queue *
a6151c3a5   Jens Axboe   cfq-iosched: appl...
2886
  cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2887
2888
  	      gfp_t gfp_mask)
  {
fd0928df9   Jens Axboe   ioprio: move io p...
2889
2890
  	const int ioprio = task_ioprio(ioc);
  	const int ioprio_class = task_ioprio_class(ioc);
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2891
  	struct cfq_queue **async_cfqq = NULL;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2892
  	struct cfq_queue *cfqq = NULL;
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2893
2894
2895
2896
  	if (!is_sync) {
  		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
  		cfqq = *async_cfqq;
  	}
6118b70b3   Jens Axboe   cfq-iosched: get ...
2897
  	if (!cfqq)
fd0928df9   Jens Axboe   ioprio: move io p...
2898
  		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2899
2900
2901
2902
  
  	/*
  	 * pin the queue now that it's allocated, scheduler exit will prune it
  	 */
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2903
  	if (!is_sync && !(*async_cfqq)) {
30d7b9448   Shaohua Li   block cfq: don't ...
2904
  		cfqq->ref++;
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
2905
  		*async_cfqq = cfqq;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2906
  	}
30d7b9448   Shaohua Li   block cfq: don't ...
2907
  	cfqq->ref++;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
2908
2909
  	return cfqq;
  }
498d3aa2b   Jens Axboe   [PATCH] cfq-iosch...
2910
2911
2912
  /*
   * We drop cfq io contexts lazily, so we may find a dead one.
   */
dbecf3ab4   OGAWA Hirofumi   [PATCH 2/2] cfq: ...
2913
  static void
4ac845a2e   Jens Axboe   block: cfq: make ...
2914
2915
  cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
  		  struct cfq_io_context *cic)
dbecf3ab4   OGAWA Hirofumi   [PATCH 2/2] cfq: ...
2916
  {
4ac845a2e   Jens Axboe   block: cfq: make ...
2917
  	unsigned long flags;
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
2918
  	WARN_ON(!list_empty(&cic->queue_list));
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2919
  	BUG_ON(cic->key != cfqd_dead_key(cfqd));
597bc485d   Jens Axboe   cfq-iosched: spee...
2920

4ac845a2e   Jens Axboe   block: cfq: make ...
2921
  	spin_lock_irqsave(&ioc->lock, flags);
726e99ab8   Shaohua Li   cfq-iosched: make...
2922
2923
  	BUG_ON(rcu_dereference_check(ioc->ioc_data,
  		lockdep_is_held(&ioc->lock)) == cic);
597bc485d   Jens Axboe   cfq-iosched: spee...
2924

80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
2925
  	radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
2926
  	hlist_del_rcu(&cic->cic_list);
4ac845a2e   Jens Axboe   block: cfq: make ...
2927
2928
2929
  	spin_unlock_irqrestore(&ioc->lock, flags);
  
  	cfq_cic_free(cic);
dbecf3ab4   OGAWA Hirofumi   [PATCH 2/2] cfq: ...
2930
  }
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2931
  static struct cfq_io_context *
4ac845a2e   Jens Axboe   block: cfq: make ...
2932
  cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2933
  {
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2934
  	struct cfq_io_context *cic;
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2935
  	unsigned long flags;
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2936

91fac317a   Vasily Tarasov   cfq-iosched: get ...
2937
2938
  	if (unlikely(!ioc))
  		return NULL;
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2939
  	rcu_read_lock();
597bc485d   Jens Axboe   cfq-iosched: spee...
2940
2941
2942
  	/*
  	 * we maintain a last-hit cache, to avoid browsing over the tree
  	 */
4ac845a2e   Jens Axboe   block: cfq: make ...
2943
  	cic = rcu_dereference(ioc->ioc_data);
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2944
2945
  	if (cic && cic->key == cfqd) {
  		rcu_read_unlock();
597bc485d   Jens Axboe   cfq-iosched: spee...
2946
  		return cic;
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2947
  	}
597bc485d   Jens Axboe   cfq-iosched: spee...
2948

4ac845a2e   Jens Axboe   block: cfq: make ...
2949
  	do {
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
2950
  		cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
4ac845a2e   Jens Axboe   block: cfq: make ...
2951
2952
2953
  		rcu_read_unlock();
  		if (!cic)
  			break;
bca4b914b   Konstantin Khlebnikov   cfq-iosched: remo...
2954
  		if (unlikely(cic->key != cfqd)) {
4ac845a2e   Jens Axboe   block: cfq: make ...
2955
  			cfq_drop_dead_cic(cfqd, ioc, cic);
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2956
  			rcu_read_lock();
4ac845a2e   Jens Axboe   block: cfq: make ...
2957
  			continue;
dbecf3ab4   OGAWA Hirofumi   [PATCH 2/2] cfq: ...
2958
  		}
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2959

d6de8be71   Jens Axboe   cfq-iosched: fix ...
2960
  		spin_lock_irqsave(&ioc->lock, flags);
4ac845a2e   Jens Axboe   block: cfq: make ...
2961
  		rcu_assign_pointer(ioc->ioc_data, cic);
d6de8be71   Jens Axboe   cfq-iosched: fix ...
2962
  		spin_unlock_irqrestore(&ioc->lock, flags);
4ac845a2e   Jens Axboe   block: cfq: make ...
2963
2964
  		break;
  	} while (1);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2965

4ac845a2e   Jens Axboe   block: cfq: make ...
2966
  	return cic;
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2967
  }
4ac845a2e   Jens Axboe   block: cfq: make ...
2968
2969
2970
2971
2972
  /*
   * Add cic into ioc, using cfqd as the search key. This enables us to lookup
   * the process specific cfq io context when entered from the block layer.
   * Also adds the cic to a per-cfqd list, used when this queue is removed.
   */
febffd618   Jens Axboe   cfq-iosched: kill...
2973
2974
  static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
  			struct cfq_io_context *cic, gfp_t gfp_mask)
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2975
  {
0261d6886   Jens Axboe   [PATCH] CFQ: use ...
2976
  	unsigned long flags;
4ac845a2e   Jens Axboe   block: cfq: make ...
2977
  	int ret;
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2978

4ac845a2e   Jens Axboe   block: cfq: make ...
2979
2980
2981
2982
  	ret = radix_tree_preload(gfp_mask);
  	if (!ret) {
  		cic->ioc = ioc;
  		cic->key = cfqd;
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2983

4ac845a2e   Jens Axboe   block: cfq: make ...
2984
2985
  		spin_lock_irqsave(&ioc->lock, flags);
  		ret = radix_tree_insert(&ioc->radix_root,
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
2986
  						cfqd->cic_index, cic);
ffc4e7595   Jens Axboe   cfq-iosched: add ...
2987
2988
  		if (!ret)
  			hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
4ac845a2e   Jens Axboe   block: cfq: make ...
2989
  		spin_unlock_irqrestore(&ioc->lock, flags);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2990

4ac845a2e   Jens Axboe   block: cfq: make ...
2991
2992
2993
2994
2995
2996
2997
  		radix_tree_preload_end();
  
  		if (!ret) {
  			spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  			list_add(&cic->queue_list, &cfqd->cic_list);
  			spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  		}
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
2998
  	}
4ac845a2e   Jens Axboe   block: cfq: make ...
2999
3000
3001
  	if (ret)
  		printk(KERN_ERR "cfq: cic link failed!
  ");
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
3002

4ac845a2e   Jens Axboe   block: cfq: make ...
3003
  	return ret;
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3004
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3005
3006
3007
  /*
   * Setup general io context and cfq io context. There can be several cfq
   * io contexts per general io context, if this process is doing io to more
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3008
   * than one device managed by cfq.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3009
3010
   */
  static struct cfq_io_context *
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3011
  cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3012
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3013
  	struct io_context *ioc = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3014
  	struct cfq_io_context *cic;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3015

22e2c507c   Jens Axboe   [PATCH] Update cf...
3016
  	might_sleep_if(gfp_mask & __GFP_WAIT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3017

b5deef901   Jens Axboe   [PATCH] Make sure...
3018
  	ioc = get_io_context(gfp_mask, cfqd->queue->node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3019
3020
  	if (!ioc)
  		return NULL;
4ac845a2e   Jens Axboe   block: cfq: make ...
3021
  	cic = cfq_cic_lookup(cfqd, ioc);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3022
3023
  	if (cic)
  		goto out;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3024

e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3025
3026
3027
  	cic = cfq_alloc_io_context(cfqd, gfp_mask);
  	if (cic == NULL)
  		goto err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3028

4ac845a2e   Jens Axboe   block: cfq: make ...
3029
3030
  	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
  		goto err_free;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3031
  out:
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
3032
3033
3034
  	smp_read_barrier_depends();
  	if (unlikely(ioc->ioprio_changed))
  		cfq_ioc_set_ioprio(ioc);
24610333d   Vivek Goyal   blkio: Drop the r...
3035
3036
3037
3038
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	if (unlikely(ioc->cgroup_changed))
  		cfq_ioc_set_cgroup(ioc);
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3039
  	return cic;
4ac845a2e   Jens Axboe   block: cfq: make ...
3040
3041
  err_free:
  	cfq_cic_free(cic);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3042
3043
3044
3045
  err:
  	put_io_context(ioc);
  	return NULL;
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
3046
  static void
383cd7213   Shaohua Li   CFQ: move think t...
3047
  __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3048
  {
383cd7213   Shaohua Li   CFQ: move think t...
3049
3050
  	unsigned long elapsed = jiffies - ttime->last_end_request;
  	elapsed = min(elapsed, 2UL * slice_idle);
db3b5848e   Kiyoshi Ueda   When cfq I/O sche...
3051

383cd7213   Shaohua Li   CFQ: move think t...
3052
3053
3054
3055
3056
3057
3058
3059
3060
  	ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
  	ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
  	ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
  }
  
  static void
  cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  	struct cfq_io_context *cic)
  {
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
3061
  	if (cfq_cfqq_sync(cfqq)) {
383cd7213   Shaohua Li   CFQ: move think t...
3062
  		__cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
3063
3064
3065
  		__cfq_update_io_thinktime(&cfqq->service_tree->ttime,
  			cfqd->cfq_slice_idle);
  	}
7700fc4f6   Shaohua Li   CFQ: add think ti...
3066
3067
3068
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
  #endif
22e2c507c   Jens Axboe   [PATCH] Update cf...
3069
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3070

206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
3071
  static void
b2c18e1e0   Jeff Moyer   cfq: calculate th...
3072
  cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f531   Jens Axboe   cfq-iosched: deve...
3073
  		       struct request *rq)
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
3074
  {
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
3075
  	sector_t sdist = 0;
41647e7a9   Corrado Zoccolo   cfq-iosched: reth...
3076
  	sector_t n_sec = blk_rq_sectors(rq);
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
3077
3078
3079
3080
3081
3082
  	if (cfqq->last_request_pos) {
  		if (cfqq->last_request_pos < blk_rq_pos(rq))
  			sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
  		else
  			sdist = cfqq->last_request_pos - blk_rq_pos(rq);
  	}
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
3083

3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
3084
  	cfqq->seek_history <<= 1;
41647e7a9   Corrado Zoccolo   cfq-iosched: reth...
3085
3086
3087
3088
  	if (blk_queue_nonrot(cfqd->queue))
  		cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
  	else
  		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
206dc69b3   Jens Axboe   [BLOCK] cfq-iosch...
3089
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3090

22e2c507c   Jens Axboe   [PATCH] Update cf...
3091
3092
3093
3094
3095
3096
3097
3098
  /*
   * Disable idle window if the process thinks too long or seeks so much that
   * it doesn't matter
   */
  static void
  cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  		       struct cfq_io_context *cic)
  {
7b679138b   Jens Axboe   cfq-iosched: add ...
3099
  	int old_idle, enable_idle;
1be92f2fc   Jens Axboe   cfq-iosched: neve...
3100

0871714e0   Jens Axboe   cfq-iosched: rela...
3101
3102
3103
3104
  	/*
  	 * Don't idle for async or idle io prio class
  	 */
  	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2fc   Jens Axboe   cfq-iosched: neve...
3105
  		return;
c265a7f41   Jens Axboe   cfq-iosched: get ...
3106
  	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3107

76280aff1   Corrado Zoccolo   cfq-iosched: idli...
3108
3109
  	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
  		cfq_mark_cfqq_deep(cfqq);
749ef9f84   Corrado Zoccolo   cfq: improve fsyn...
3110
3111
3112
  	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
  		enable_idle = 0;
  	else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3dde36dde   Corrado Zoccolo   cfq-iosched: rewo...
3113
  	    (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
22e2c507c   Jens Axboe   [PATCH] Update cf...
3114
  		enable_idle = 0;
383cd7213   Shaohua Li   CFQ: move think t...
3115
3116
  	else if (sample_valid(cic->ttime.ttime_samples)) {
  		if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3117
3118
3119
  			enable_idle = 0;
  		else
  			enable_idle = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3120
  	}
7b679138b   Jens Axboe   cfq-iosched: add ...
3121
3122
3123
3124
3125
3126
3127
  	if (old_idle != enable_idle) {
  		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
  		if (enable_idle)
  			cfq_mark_cfqq_idle_window(cfqq);
  		else
  			cfq_clear_cfqq_idle_window(cfqq);
  	}
22e2c507c   Jens Axboe   [PATCH] Update cf...
3128
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3129

22e2c507c   Jens Axboe   [PATCH] Update cf...
3130
3131
3132
3133
  /*
   * Check if new_cfqq should preempt the currently active queue. Return 0 for
   * no or if we aren't sure, a 1 will cause a preempt.
   */
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3134
  static bool
22e2c507c   Jens Axboe   [PATCH] Update cf...
3135
  cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3136
  		   struct request *rq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3137
  {
6d048f531   Jens Axboe   cfq-iosched: deve...
3138
  	struct cfq_queue *cfqq;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3139

6d048f531   Jens Axboe   cfq-iosched: deve...
3140
3141
  	cfqq = cfqd->active_queue;
  	if (!cfqq)
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3142
  		return false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3143

6d048f531   Jens Axboe   cfq-iosched: deve...
3144
  	if (cfq_class_idle(new_cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3145
  		return false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3146
3147
  
  	if (cfq_class_idle(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3148
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
3149

22e2c507c   Jens Axboe   [PATCH] Update cf...
3150
  	/*
875feb63b   Divyesh Shah   cfq-iosched: Resp...
3151
3152
3153
3154
3155
3156
  	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
  	 */
  	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
  		return false;
  
  	/*
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
3157
3158
3159
  	 * if the new request is sync, but the currently running queue is
  	 * not, let the sync request have priority.
  	 */
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3160
  	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3161
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
3162

8682e1f15   Vivek Goyal   blkio: Provide so...
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
  	if (new_cfqq->cfqg != cfqq->cfqg)
  		return false;
  
  	if (cfq_slice_used(cfqq))
  		return true;
  
  	/* Allow preemption only if we are idling on sync-noidle tree */
  	if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
  	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
  	    new_cfqq->service_tree->count == 2 &&
  	    RB_EMPTY_ROOT(&cfqq->sort_list))
  		return true;
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
3175
  	/*
3a9a3f6cc   Divyesh Shah   cfq-iosched: Allo...
3176
3177
3178
  	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
  	 */
  	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3179
  		return true;
3a9a3f6cc   Divyesh Shah   cfq-iosched: Allo...
3180

d2d59e18a   Shaohua Li   cfq-iosched: sche...
3181
3182
3183
  	/* An idle queue should not be idle now for some reason */
  	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
3184
  	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3185
  		return false;
1e3335de0   Jens Axboe   cfq-iosched: impr...
3186
3187
3188
3189
3190
  
  	/*
  	 * if this request is as-good as one we would expect from the
  	 * current cfqq, let it preempt
  	 */
e9ce335df   Shaohua Li   cfq-iosched: fix ...
3191
  	if (cfq_rq_close(cfqd, cfqq, rq))
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3192
  		return true;
1e3335de0   Jens Axboe   cfq-iosched: impr...
3193

a6151c3a5   Jens Axboe   cfq-iosched: appl...
3194
  	return false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3195
3196
3197
3198
3199
3200
3201
3202
  }
  
  /*
   * cfqq preempts the active queue. if we allowed preempt with no slice left,
   * let it have half of its nominal slice.
   */
  static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
f8ae6e3eb   Shaohua Li   block cfq: make q...
3203
  	struct cfq_queue *old_cfqq = cfqd->active_queue;
7b679138b   Jens Axboe   cfq-iosched: add ...
3204
  	cfq_log_cfqq(cfqd, cfqq, "preempt");
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3205
  	cfq_slice_expired(cfqd, 1);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3206

bf5722567   Jens Axboe   [PATCH] cfq-iosch...
3207
  	/*
f8ae6e3eb   Shaohua Li   block cfq: make q...
3208
3209
3210
3211
3212
3213
3214
  	 * workload type is changed, don't save slice, otherwise preempt
  	 * doesn't happen
  	 */
  	if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
  		cfqq->cfqg->saved_workload_slice = 0;
  
  	/*
bf5722567   Jens Axboe   [PATCH] cfq-iosch...
3215
3216
3217
3218
  	 * Put the new queue at the front of the of the current list,
  	 * so we know that it will be selected next.
  	 */
  	BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd9   Jens Axboe   cfq-iosched: get ...
3219
3220
  
  	cfq_service_tree_add(cfqd, cfqq, 1);
eda5e0c91   Justin TerAvest   cfq-iosched: Don'...
3221

62a37f6ba   Justin TerAvest   cfq-iosched: Don'...
3222
3223
  	cfqq->slice_end = 0;
  	cfq_mark_cfqq_slice_new(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3224
3225
3226
  }
  
  /*
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3227
   * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507c   Jens Axboe   [PATCH] Update cf...
3228
3229
3230
   * something we should do about it
   */
  static void
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3231
3232
  cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  		struct request *rq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3233
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3234
  	struct cfq_io_context *cic = RQ_CIC(rq);
12e9fddd6   Jens Axboe   [PATCH] cfq-iosch...
3235

45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3236
  	cfqd->rq_queued++;
374f84ac3   Jens Axboe   [PATCH] cfq-iosch...
3237

383cd7213   Shaohua Li   CFQ: move think t...
3238
  	cfq_update_io_thinktime(cfqd, cfqq, cic);
b2c18e1e0   Jeff Moyer   cfq: calculate th...
3239
  	cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a12   Jens Axboe   [PATCH] cfq-iosch...
3240
  	cfq_update_idle_window(cfqd, cfqq, cic);
b2c18e1e0   Jeff Moyer   cfq: calculate th...
3241
  	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3242
3243
3244
  
  	if (cfqq == cfqd->active_queue) {
  		/*
b029195dd   Jens Axboe   cfq-iosched: don'...
3245
3246
3247
  		 * Remember that we saw a request from this process, but
  		 * don't start queuing just yet. Otherwise we risk seeing lots
  		 * of tiny requests, because we disrupt the normal plugging
d6ceb25e8   Jens Axboe   cfq-iosched: don'...
3248
3249
  		 * and merging. If the request is already larger than a single
  		 * page, let it rip immediately. For that case we assume that
2d8707229   Jens Axboe   cfq-iosched: twea...
3250
3251
3252
  		 * merging is already done. Ditto for a busy system that
  		 * has other work pending, don't risk delaying until the
  		 * idle timer unplug to continue working.
22e2c507c   Jens Axboe   [PATCH] Update cf...
3253
  		 */
d6ceb25e8   Jens Axboe   cfq-iosched: don'...
3254
  		if (cfq_cfqq_wait_request(cfqq)) {
2d8707229   Jens Axboe   cfq-iosched: twea...
3255
3256
  			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
  			    cfqd->busy_queues > 1) {
812df48d1   Divyesh Shah   blkio: Add more d...
3257
  				cfq_del_timer(cfqd, cfqq);
554554f60   Gui Jianfeng   cfq: Remove wait_...
3258
  				cfq_clear_cfqq_wait_request(cfqq);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3259
  				__blk_run_queue(cfqd->queue);
a11cdaa7a   Divyesh Shah   block: Update to ...
3260
  			} else {
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3261
  				cfq_blkiocg_update_idle_time_stats(
a11cdaa7a   Divyesh Shah   block: Update to ...
3262
  						&cfqq->cfqg->blkg);
bf7919371   Vivek Goyal   blkio: Set must_d...
3263
  				cfq_mark_cfqq_must_dispatch(cfqq);
a11cdaa7a   Divyesh Shah   block: Update to ...
3264
  			}
d6ceb25e8   Jens Axboe   cfq-iosched: don'...
3265
  		}
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3266
  	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3267
3268
3269
  		/*
  		 * not the active queue - expire current slice if it is
  		 * idle and has expired it's mean thinktime or this new queue
3a9a3f6cc   Divyesh Shah   cfq-iosched: Allo...
3270
3271
  		 * has some old slice time left and is of higher priority or
  		 * this new queue is RT and the current one is BE
22e2c507c   Jens Axboe   [PATCH] Update cf...
3272
3273
  		 */
  		cfq_preempt_queue(cfqd, cfqq);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3274
  		__blk_run_queue(cfqd->queue);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3275
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3276
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3277
  static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3278
  {
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3279
  	struct cfq_data *cfqd = q->elevator->elevator_data;
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3280
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3281

7b679138b   Jens Axboe   cfq-iosched: add ...
3282
  	cfq_log_cfqq(cfqd, cfqq, "insert_request");
fd0928df9   Jens Axboe   ioprio: move io p...
3283
  	cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3284

30996f40b   Jens Axboe   cfq-iosched: fix ...
3285
  	rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3286
  	list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3de   Corrado Zoccolo   cfq-iosched: prep...
3287
  	cfq_add_rq_rb(rq);
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3288
  	cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
cdc1184cf   Divyesh Shah   blkio: Add io_que...
3289
3290
  			&cfqd->serving_group->blkg, rq_data_dir(rq),
  			rq_is_sync(rq));
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3291
  	cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3292
  }
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3293
3294
3295
3296
3297
3298
  /*
   * Update hw_tag based on peak queue depth over 50 samples under
   * sufficient load.
   */
  static void cfq_update_hw_tag(struct cfq_data *cfqd)
  {
1a1238a7d   Shaohua Li   cfq-iosched: impr...
3299
  	struct cfq_queue *cfqq = cfqd->active_queue;
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3300
3301
  	if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
  		cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
3302
3303
3304
  
  	if (cfqd->hw_tag == 1)
  		return;
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3305
3306
  
  	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3307
  	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3308
  		return;
1a1238a7d   Shaohua Li   cfq-iosched: impr...
3309
3310
3311
3312
3313
3314
3315
  	/*
  	 * If active queue hasn't enough requests and can idle, cfq might not
  	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
  	 * case
  	 */
  	if (cfqq && cfq_cfqq_idle_window(cfqq) &&
  	    cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3316
  	    CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
1a1238a7d   Shaohua Li   cfq-iosched: impr...
3317
  		return;
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3318
3319
  	if (cfqd->hw_tag_samples++ < 50)
  		return;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
3320
  	if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3321
3322
3323
  		cfqd->hw_tag = 1;
  	else
  		cfqd->hw_tag = 0;
45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3324
  }
7667aa063   Vivek Goyal   cfq-iosched: Take...
3325
3326
3327
  static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  {
  	struct cfq_io_context *cic = cfqd->active_cic;
02a8f01b5   Justin TerAvest   cfq-iosched: Don'...
3328
3329
3330
  	/* If the queue already has requests, don't wait */
  	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  		return false;
7667aa063   Vivek Goyal   cfq-iosched: Take...
3331
3332
3333
  	/* If there are other queues in the group, don't wait */
  	if (cfqq->cfqg->nr_cfqq > 1)
  		return false;
7700fc4f6   Shaohua Li   CFQ: add think ti...
3334
3335
3336
  	/* the only queue in the group, but think time is big */
  	if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
  		return false;
7667aa063   Vivek Goyal   cfq-iosched: Take...
3337
3338
3339
3340
  	if (cfq_slice_used(cfqq))
  		return true;
  
  	/* if slice left is less than think time, wait busy */
383cd7213   Shaohua Li   CFQ: move think t...
3341
3342
  	if (cic && sample_valid(cic->ttime.ttime_samples)
  	    && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
7667aa063   Vivek Goyal   cfq-iosched: Take...
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
  		return true;
  
  	/*
  	 * If think times is less than a jiffy than ttime_mean=0 and above
  	 * will not be true. It might happen that slice has not expired yet
  	 * but will expire soon (4-5 ns) during select_queue(). To cover the
  	 * case where think time is less than a jiffy, mark the queue wait
  	 * busy if only 1 jiffy is left in the slice.
  	 */
  	if (cfqq->slice_end - jiffies == 1)
  		return true;
  
  	return false;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3357
  static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3358
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3359
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3360
  	struct cfq_data *cfqd = cfqq->cfqd;
5380a101d   Jens Axboe   [PATCH] cfq-iosch...
3361
  	const int sync = rq_is_sync(rq);
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3362
  	unsigned long now;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3363

b4878f245   Jens Axboe   [PATCH] 02/05: up...
3364
  	now = jiffies;
33659ebba   Christoph Hellwig   block: remove wra...
3365
3366
  	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
  		     !!(rq->cmd_flags & REQ_NOIDLE));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3367

45333d5a3   Aaron Carroll   cfq-iosched: fix ...
3368
  	cfq_update_hw_tag(cfqd);
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3369
  	WARN_ON(!cfqd->rq_in_driver);
6d048f531   Jens Axboe   cfq-iosched: deve...
3370
  	WARN_ON(!cfqq->dispatched);
53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3371
  	cfqd->rq_in_driver--;
6d048f531   Jens Axboe   cfq-iosched: deve...
3372
  	cfqq->dispatched--;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3373
  	(RQ_CFQG(rq))->dispatched--;
e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3374
3375
3376
  	cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
  			rq_start_time_ns(rq), rq_io_start_time_ns(rq),
  			rq_data_dir(rq), rq_is_sync(rq));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3377

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3378
  	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3ed9a2965   Jens Axboe   cfq-iosched: impr...
3379

365722bb9   Vivek Goyal   cfq-iosched: dela...
3380
  	if (sync) {
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
3381
  		struct cfq_rb_root *service_tree;
383cd7213   Shaohua Li   CFQ: move think t...
3382
  		RQ_CIC(rq)->ttime.last_end_request = now;
f5f2b6ceb   Shaohua Li   CFQ: add think ti...
3383
3384
3385
3386
3387
3388
3389
  
  		if (cfq_cfqq_on_rr(cfqq))
  			service_tree = cfqq->service_tree;
  		else
  			service_tree = service_tree_for(cfqq->cfqg,
  				cfqq_prio(cfqq), cfqq_type(cfqq));
  		service_tree->ttime.last_end_request = now;
573412b29   Corrado Zoccolo   cfq-iosched: redu...
3390
3391
  		if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
  			cfqd->last_delayed_sync = now;
365722bb9   Vivek Goyal   cfq-iosched: dela...
3392
  	}
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3393

7700fc4f6   Shaohua Li   CFQ: add think ti...
3394
3395
3396
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	cfqq->cfqg->ttime.last_end_request = now;
  #endif
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3397
3398
3399
3400
3401
  	/*
  	 * If this is the active queue, check if it needs to be expired,
  	 * or if we want to idle in case it has no pending requests.
  	 */
  	if (cfqd->active_queue == cfqq) {
a36e71f99   Jens Axboe   cfq-iosched: add ...
3402
  		const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
44f7c1606   Jens Axboe   cfq-iosched: defe...
3403
3404
3405
3406
  		if (cfq_cfqq_slice_new(cfqq)) {
  			cfq_set_prio_slice(cfqd, cfqq);
  			cfq_clear_cfqq_slice_new(cfqq);
  		}
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3407
3408
  
  		/*
7667aa063   Vivek Goyal   cfq-iosched: Take...
3409
3410
  		 * Should we wait for next request to come in before we expire
  		 * the queue.
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3411
  		 */
7667aa063   Vivek Goyal   cfq-iosched: Take...
3412
  		if (cfq_should_wait_busy(cfqd, cfqq)) {
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3413
3414
3415
3416
  			unsigned long extend_sl = cfqd->cfq_slice_idle;
  			if (!cfqd->cfq_slice_idle)
  				extend_sl = cfqd->cfq_group_idle;
  			cfqq->slice_end = jiffies + extend_sl;
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3417
  			cfq_mark_cfqq_wait_busy(cfqq);
b1ffe737f   Divyesh Shah   cfq-iosched: Add ...
3418
  			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
f75edf2dc   Vivek Goyal   blkio: Wait for c...
3419
  		}
a36e71f99   Jens Axboe   cfq-iosched: add ...
3420
  		/*
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
3421
3422
3423
3424
3425
3426
  		 * Idling is not enabled on:
  		 * - expired queues
  		 * - idle-priority queues
  		 * - async queues
  		 * - queues with still some requests queued
  		 * - when there is a close cooperator
a36e71f99   Jens Axboe   cfq-iosched: add ...
3427
  		 */
0871714e0   Jens Axboe   cfq-iosched: rela...
3428
  		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3429
  			cfq_slice_expired(cfqd, 1);
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
3430
3431
  		else if (sync && cfqq_empty &&
  			 !cfq_close_cooperator(cfqd, cfqq)) {
749ef9f84   Corrado Zoccolo   cfq: improve fsyn...
3432
  			cfq_arm_slice_timer(cfqd);
8e550632c   Corrado Zoccolo   cfq-iosched: fix ...
3433
  		}
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3434
  	}
6d048f531   Jens Axboe   cfq-iosched: deve...
3435

53c583d22   Corrado Zoccolo   cfq-iosched: requ...
3436
  	if (!cfqd->rq_in_driver)
23e018a1b   Jens Axboe   block: get rid of...
3437
  		cfq_schedule_dispatch(cfqd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3438
  }
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
3439
  static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3440
  {
1b379d8da   Jens Axboe   cfq-iosched: get ...
3441
  	if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3442
  		cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3443
  		return ELV_MQUEUE_MUST;
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3444
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3445

22e2c507c   Jens Axboe   [PATCH] Update cf...
3446
  	return ELV_MQUEUE_MAY;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3447
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3448
  static int cfq_may_queue(struct request_queue *q, int rw)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3449
3450
3451
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct task_struct *tsk = current;
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3452
  	struct cfq_io_context *cic;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3453
3454
3455
3456
3457
3458
3459
3460
  	struct cfq_queue *cfqq;
  
  	/*
  	 * don't force setup of a queue from here, as a call to may_queue
  	 * does not necessarily imply that a request actually will be queued.
  	 * so just lookup a possibly existing queue, or return 'may queue'
  	 * if that fails
  	 */
4ac845a2e   Jens Axboe   block: cfq: make ...
3461
  	cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3462
3463
  	if (!cic)
  		return ELV_MQUEUE_MAY;
b0b78f81a   Jens Axboe   cfq-iosched: use ...
3464
  	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
22e2c507c   Jens Axboe   [PATCH] Update cf...
3465
  	if (cfqq) {
fd0928df9   Jens Axboe   ioprio: move io p...
3466
  		cfq_init_prio_data(cfqq, cic->ioc);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3467

89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
3468
  		return __cfq_may_queue(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3469
3470
3471
  	}
  
  	return ELV_MQUEUE_MAY;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3472
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3473
3474
3475
  /*
   * queue lock held here
   */
bb37b94c6   Jens Axboe   [BLOCK] Cleanup u...
3476
  static void cfq_put_request(struct request *rq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3477
  {
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3478
  	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3479

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3480
  	if (cfqq) {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3481
  		const int rw = rq_data_dir(rq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3482

22e2c507c   Jens Axboe   [PATCH] Update cf...
3483
3484
  		BUG_ON(!cfqq->allocated[rw]);
  		cfqq->allocated[rw]--;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3485

5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3486
  		put_io_context(RQ_CIC(rq)->ioc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3487

c186794db   Mike Snitzer   block: share requ...
3488
3489
  		rq->elevator_private[0] = NULL;
  		rq->elevator_private[1] = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3490

7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
3491
3492
  		/* Put down rq reference on cfqg */
  		cfq_put_cfqg(RQ_CFQG(rq));
c186794db   Mike Snitzer   block: share requ...
3493
  		rq->elevator_private[2] = NULL;
7f1dc8a2d   Vivek Goyal   blkio: Fix blkio ...
3494

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3495
3496
3497
  		cfq_put_queue(cfqq);
  	}
  }
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3498
3499
3500
3501
3502
3503
  static struct cfq_queue *
  cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
  		struct cfq_queue *cfqq)
  {
  	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
  	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d0408   Jeff Moyer   cfq: change the m...
3504
  	cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3505
3506
3507
  	cfq_put_queue(cfqq);
  	return cic_to_cfqq(cic, 1);
  }
e6c5bc737   Jeff Moyer   cfq: break apart ...
3508
3509
3510
3511
3512
3513
3514
3515
  /*
   * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
   * was the last process referring to said cfqq.
   */
  static struct cfq_queue *
  split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
  {
  	if (cfqq_process_refs(cfqq) == 1) {
e6c5bc737   Jeff Moyer   cfq: break apart ...
3516
3517
  		cfqq->pid = current->pid;
  		cfq_clear_cfqq_coop(cfqq);
ae54abed6   Shaohua Li   cfq-iosched: spli...
3518
  		cfq_clear_cfqq_split_coop(cfqq);
e6c5bc737   Jeff Moyer   cfq: break apart ...
3519
3520
3521
3522
  		return cfqq;
  	}
  
  	cic_set_cfqq(cic, NULL, 1);
d02a2c077   Shaohua Li   cfq-iosched: fix ...
3523
3524
  
  	cfq_put_cooperator(cfqq);
e6c5bc737   Jeff Moyer   cfq: break apart ...
3525
3526
3527
  	cfq_put_queue(cfqq);
  	return NULL;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3528
  /*
22e2c507c   Jens Axboe   [PATCH] Update cf...
3529
   * Allocate cfq data structures associated with this request.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3530
   */
22e2c507c   Jens Axboe   [PATCH] Update cf...
3531
  static int
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3532
  cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3533
3534
3535
3536
  {
  	struct cfq_data *cfqd = q->elevator->elevator_data;
  	struct cfq_io_context *cic;
  	const int rw = rq_data_dir(rq);
a6151c3a5   Jens Axboe   cfq-iosched: appl...
3537
  	const bool is_sync = rq_is_sync(rq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3538
  	struct cfq_queue *cfqq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3539
3540
3541
  	unsigned long flags;
  
  	might_sleep_if(gfp_mask & __GFP_WAIT);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3542
  	cic = cfq_get_io_context(cfqd, gfp_mask);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3543

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3544
  	spin_lock_irqsave(q->queue_lock, flags);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3545
3546
  	if (!cic)
  		goto queue_fail;
e6c5bc737   Jeff Moyer   cfq: break apart ...
3547
  new_queue:
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3548
  	cfqq = cic_to_cfqq(cic, is_sync);
32f2e807a   Vivek Goyal   cfq-iosched: rese...
3549
  	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
fd0928df9   Jens Axboe   ioprio: move io p...
3550
  		cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3551
  		cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3552
3553
  	} else {
  		/*
e6c5bc737   Jeff Moyer   cfq: break apart ...
3554
3555
  		 * If the queue was seeky for too long, break it apart.
  		 */
ae54abed6   Shaohua Li   cfq-iosched: spli...
3556
  		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
e6c5bc737   Jeff Moyer   cfq: break apart ...
3557
3558
3559
3560
3561
3562
3563
  			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
  			cfqq = split_cfqq(cic, cfqq);
  			if (!cfqq)
  				goto new_queue;
  		}
  
  		/*
df5fe3e8e   Jeff Moyer   cfq: merge cooper...
3564
3565
3566
3567
3568
3569
3570
  		 * Check to see if this queue is scheduled to merge with
  		 * another, closely cooperating queue.  The merging of
  		 * queues happens here as it must be done in process context.
  		 * The reference on new_cfqq was taken in merge_cfqqs.
  		 */
  		if (cfqq->new_cfqq)
  			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317a   Vasily Tarasov   cfq-iosched: get ...
3571
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3572
3573
  
  	cfqq->allocated[rw]++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3574

6fae9c251   Jens Axboe   Merge commit 'v2....
3575
  	cfqq->ref++;
c186794db   Mike Snitzer   block: share requ...
3576
3577
3578
  	rq->elevator_private[0] = cic;
  	rq->elevator_private[1] = cfqq;
  	rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
93803e014   Jens Axboe   cfq-iosched: fix ...
3579
  	spin_unlock_irqrestore(q->queue_lock, flags);
5e7053747   Jens Axboe   [PATCH] cfq-iosch...
3580
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3581

22e2c507c   Jens Axboe   [PATCH] Update cf...
3582
  queue_fail:
23e018a1b   Jens Axboe   block: get rid of...
3583
  	cfq_schedule_dispatch(cfqd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3584
  	spin_unlock_irqrestore(q->queue_lock, flags);
7b679138b   Jens Axboe   cfq-iosched: add ...
3585
  	cfq_log(cfqd, "set_request fail");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3586
3587
  	return 1;
  }
65f27f384   David Howells   WorkStruct: Pass ...
3588
  static void cfq_kick_queue(struct work_struct *work)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3589
  {
65f27f384   David Howells   WorkStruct: Pass ...
3590
  	struct cfq_data *cfqd =
23e018a1b   Jens Axboe   block: get rid of...
3591
  		container_of(work, struct cfq_data, unplug_work);
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3592
  	struct request_queue *q = cfqd->queue;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3593

40bb54d19   Jens Axboe   cfq-iosched: no n...
3594
  	spin_lock_irq(q->queue_lock);
24ecfbe27   Christoph Hellwig   block: add blk_ru...
3595
  	__blk_run_queue(cfqd->queue);
40bb54d19   Jens Axboe   cfq-iosched: no n...
3596
  	spin_unlock_irq(q->queue_lock);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
  }
  
  /*
   * Timer running if the active_queue is currently idling inside its time slice
   */
  static void cfq_idle_slice_timer(unsigned long data)
  {
  	struct cfq_data *cfqd = (struct cfq_data *) data;
  	struct cfq_queue *cfqq;
  	unsigned long flags;
3c6bd2f87   Jens Axboe   cfq-iosched: chec...
3607
  	int timed_out = 1;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3608

7b679138b   Jens Axboe   cfq-iosched: add ...
3609
  	cfq_log(cfqd, "idle timer fired");
22e2c507c   Jens Axboe   [PATCH] Update cf...
3610
  	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
fe094d98e   Jens Axboe   cfq-iosched: make...
3611
3612
  	cfqq = cfqd->active_queue;
  	if (cfqq) {
3c6bd2f87   Jens Axboe   cfq-iosched: chec...
3613
  		timed_out = 0;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3614
  		/*
b029195dd   Jens Axboe   cfq-iosched: don'...
3615
3616
3617
3618
3619
3620
  		 * We saw a request before the queue expired, let it through
  		 */
  		if (cfq_cfqq_must_dispatch(cfqq))
  			goto out_kick;
  
  		/*
22e2c507c   Jens Axboe   [PATCH] Update cf...
3621
3622
  		 * expired
  		 */
44f7c1606   Jens Axboe   cfq-iosched: defe...
3623
  		if (cfq_slice_used(cfqq))
22e2c507c   Jens Axboe   [PATCH] Update cf...
3624
3625
3626
3627
3628
3629
  			goto expire;
  
  		/*
  		 * only expire and reinvoke request handler, if there are
  		 * other queues with pending requests
  		 */
caaa5f9f0   Jens Axboe   [PATCH] cfq-iosch...
3630
  		if (!cfqd->busy_queues)
22e2c507c   Jens Axboe   [PATCH] Update cf...
3631
  			goto out_cont;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3632
3633
3634
3635
  
  		/*
  		 * not expired and it has a request pending, let it dispatch
  		 */
75e50984f   Jens Axboe   cfq-iosched: kill...
3636
  		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507c   Jens Axboe   [PATCH] Update cf...
3637
  			goto out_kick;
76280aff1   Corrado Zoccolo   cfq-iosched: idli...
3638
3639
3640
3641
3642
  
  		/*
  		 * Queue depth flag is reset only when the idle didn't succeed
  		 */
  		cfq_clear_cfqq_deep(cfqq);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3643
3644
  	}
  expire:
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3645
  	cfq_slice_expired(cfqd, timed_out);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3646
  out_kick:
23e018a1b   Jens Axboe   block: get rid of...
3647
  	cfq_schedule_dispatch(cfqd);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3648
3649
3650
  out_cont:
  	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  }
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3651
3652
3653
  static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  {
  	del_timer_sync(&cfqd->idle_slice_timer);
23e018a1b   Jens Axboe   block: get rid of...
3654
  	cancel_work_sync(&cfqd->unplug_work);
3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3655
  }
22e2c507c   Jens Axboe   [PATCH] Update cf...
3656

c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3657
3658
3659
3660
3661
3662
3663
3664
3665
  static void cfq_put_async_queues(struct cfq_data *cfqd)
  {
  	int i;
  
  	for (i = 0; i < IOPRIO_BE_NR; i++) {
  		if (cfqd->async_cfqq[0][i])
  			cfq_put_queue(cfqd->async_cfqq[0][i]);
  		if (cfqd->async_cfqq[1][i])
  			cfq_put_queue(cfqd->async_cfqq[1][i]);
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3666
  	}
2389d1ef1   Oleg Nesterov   cfq: fix IOPRIO_C...
3667
3668
3669
  
  	if (cfqd->async_idle_cfqq)
  		cfq_put_queue(cfqd->async_idle_cfqq);
c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3670
  }
b374d18a4   Jens Axboe   block: get rid of...
3671
  static void cfq_exit_queue(struct elevator_queue *e)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3672
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
3673
  	struct cfq_data *cfqd = e->elevator_data;
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3674
  	struct request_queue *q = cfqd->queue;
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3675
  	bool wait = false;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3676

3b18152c3   Jens Axboe   [PATCH] CFQ io sc...
3677
  	cfq_shutdown_timer_wq(cfqd);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3678

d9ff41879   Al Viro   [PATCH] make cfq_...
3679
  	spin_lock_irq(q->queue_lock);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3680

d9ff41879   Al Viro   [PATCH] make cfq_...
3681
  	if (cfqd->active_queue)
e5ff082e8   Vivek Goyal   blkio: Fix anothe...
3682
  		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3683
3684
  
  	while (!list_empty(&cfqd->cic_list)) {
d9ff41879   Al Viro   [PATCH] make cfq_...
3685
3686
3687
  		struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
  							struct cfq_io_context,
  							queue_list);
89850f7ee   Jens Axboe   [PATCH] cfq-iosch...
3688
3689
  
  		__cfq_exit_single_io_context(cfqd, cic);
d9ff41879   Al Viro   [PATCH] make cfq_...
3690
  	}
e2d74ac06   Jens Axboe   [PATCH] [BLOCK] c...
3691

c2dea2d1f   Vasily Tarasov   cfq: async queue ...
3692
  	cfq_put_async_queues(cfqd);
b1c357696   Vivek Goyal   blkio: Take care ...
3693
  	cfq_release_cfq_groups(cfqd);
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3694
3695
3696
3697
3698
3699
3700
  
  	/*
  	 * If there are groups which we could not unlink from blkcg list,
  	 * wait for a rcu period for them to be freed.
  	 */
  	if (cfqd->nr_blkcg_linked_grps)
  		wait = true;
15c31be4d   Jens Axboe   cfq-iosched: fix ...
3701

d9ff41879   Al Viro   [PATCH] make cfq_...
3702
  	spin_unlock_irq(q->queue_lock);
a90d742e4   Al Viro   [PATCH] don't bot...
3703
3704
  
  	cfq_shutdown_timer_wq(cfqd);
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
3705
3706
3707
  	spin_lock(&cic_index_lock);
  	ida_remove(&cic_index_ida, cfqd->cic_index);
  	spin_unlock(&cic_index_lock);
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
  	/*
  	 * Wait for cfqg->blkg->key accessors to exit their grace periods.
  	 * Do this wait only if there are other unlinked groups out
  	 * there. This can happen if cgroup deletion path claimed the
  	 * responsibility of cleaning up a group before queue cleanup code
  	 * get to the group.
  	 *
  	 * Do not call synchronize_rcu() unconditionally as there are drivers
  	 * which create/delete request queue hundreds of times during scan/boot
  	 * and synchronize_rcu() can take significant time and slow down boot.
  	 */
  	if (wait)
  		synchronize_rcu();
2abae55f5   Vivek Goyal   cfq-iosched: Fix ...
3721
3722
3723
3724
3725
  
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	/* Free up per cpu stats for root group */
  	free_percpu(cfqd->root_group.blkg.stats_cpu);
  #endif
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3726
  	kfree(cfqd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3727
  }
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
  static int cfq_alloc_cic_index(void)
  {
  	int index, error;
  
  	do {
  		if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
  			return -ENOMEM;
  
  		spin_lock(&cic_index_lock);
  		error = ida_get_new(&cic_index_ida, &index);
  		spin_unlock(&cic_index_lock);
  		if (error && error != -EAGAIN)
  			return error;
  	} while (error);
  
  	return index;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
3745
  static void *cfq_init_queue(struct request_queue *q)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3746
3747
  {
  	struct cfq_data *cfqd;
718eee057   Corrado Zoccolo   cfq-iosched: fair...
3748
  	int i, j;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
3749
  	struct cfq_group *cfqg;
615f0259e   Vivek Goyal   blkio: Implement ...
3750
  	struct cfq_rb_root *st;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3751

80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
3752
3753
3754
  	i = cfq_alloc_cic_index();
  	if (i < 0)
  		return NULL;
94f6030ca   Christoph Lameter   Slab allocators: ...
3755
  	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
1547010e6   Namhyung Kim   cfq-iosched: free...
3756
3757
3758
3759
  	if (!cfqd) {
  		spin_lock(&cic_index_lock);
  		ida_remove(&cic_index_ida, i);
  		spin_unlock(&cic_index_lock);
bc1c11697   Jens Axboe   [PATCH] elevator ...
3760
  		return NULL;
1547010e6   Namhyung Kim   cfq-iosched: free...
3761
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3762

30d7b9448   Shaohua Li   block cfq: don't ...
3763
3764
3765
3766
  	/*
  	 * Don't need take queue_lock in the routine, since we are
  	 * initializing the ioscheduler, and nobody is using cfqd
  	 */
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
3767
  	cfqd->cic_index = i;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
3768
3769
  	/* Init root service tree */
  	cfqd->grp_service_tree = CFQ_RB_ROOT;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
3770
3771
  	/* Init root group */
  	cfqg = &cfqd->root_group;
615f0259e   Vivek Goyal   blkio: Implement ...
3772
3773
  	for_each_cfqg_st(cfqg, i, j, st)
  		*st = CFQ_RB_ROOT;
1fa8f6d68   Vivek Goyal   blkio: Introduce ...
3774
  	RB_CLEAR_NODE(&cfqg->rb_node);
26a2ac009   Jens Axboe   cfq-iosched: clea...
3775

25bc6b077   Vivek Goyal   blkio: Introduce ...
3776
3777
  	/* Give preference to root group over other groups */
  	cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
3778
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
b1c357696   Vivek Goyal   blkio: Take care ...
3779
  	/*
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3780
3781
3782
3783
3784
  	 * Set root group reference to 2. One reference will be dropped when
  	 * all groups on cfqd->cfqg_list are being deleted during queue exit.
  	 * Other reference will remain there as we don't want to delete this
  	 * group as it is statically allocated and gets destroyed when
  	 * throtl_data goes away.
b1c357696   Vivek Goyal   blkio: Take care ...
3785
  	 */
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3786
  	cfqg->ref = 2;
5624a4e44   Vivek Goyal   blk-throttle: Mak...
3787
3788
3789
3790
3791
3792
  
  	if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
  		kfree(cfqg);
  		kfree(cfqd);
  		return NULL;
  	}
dcf097b24   Vivek Goyal   blk-cgroup: Fix R...
3793
  	rcu_read_lock();
5624a4e44   Vivek Goyal   blk-throttle: Mak...
3794

e98ef89b3   Vivek Goyal   cfq-iosched: Fixe...
3795
3796
  	cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
  					(void *)cfqd, 0);
dcf097b24   Vivek Goyal   blk-cgroup: Fix R...
3797
  	rcu_read_unlock();
56edf7d75   Vivek Goyal   cfq-iosched: Fix ...
3798
3799
3800
3801
  	cfqd->nr_blkcg_linked_grps++;
  
  	/* Add group on cfqd->cfqg_list */
  	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
25fb5169d   Vivek Goyal   blkio: Dynamic cf...
3802
  #endif
26a2ac009   Jens Axboe   cfq-iosched: clea...
3803
3804
3805
3806
3807
3808
3809
  	/*
  	 * Not strictly needed (since RB_ROOT just clears the node and we
  	 * zeroed cfqd on alloc), but better be safe in case someone decides
  	 * to add magic to the rb code
  	 */
  	for (i = 0; i < CFQ_PRIO_LISTS; i++)
  		cfqd->prio_trees[i] = RB_ROOT;
6118b70b3   Jens Axboe   cfq-iosched: get ...
3810
3811
3812
3813
3814
3815
  	/*
  	 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
  	 * Grab a permanent reference to it, so that the normal code flow
  	 * will not attempt to free it.
  	 */
  	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
30d7b9448   Shaohua Li   block cfq: don't ...
3816
  	cfqd->oom_cfqq.ref++;
cdb16e8f7   Vivek Goyal   blkio: Introduce ...
3817
  	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
6118b70b3   Jens Axboe   cfq-iosched: get ...
3818

d9ff41879   Al Viro   [PATCH] make cfq_...
3819
  	INIT_LIST_HEAD(&cfqd->cic_list);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3820

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3821
  	cfqd->queue = q;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3822

22e2c507c   Jens Axboe   [PATCH] Update cf...
3823
3824
3825
  	init_timer(&cfqd->idle_slice_timer);
  	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
23e018a1b   Jens Axboe   block: get rid of...
3826
  	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3827

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3828
  	cfqd->cfq_quantum = cfq_quantum;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3829
3830
  	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3831
3832
  	cfqd->cfq_back_max = cfq_back_max;
  	cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507c   Jens Axboe   [PATCH] Update cf...
3833
3834
3835
3836
  	cfqd->cfq_slice[0] = cfq_slice_async;
  	cfqd->cfq_slice[1] = cfq_slice_sync;
  	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  	cfqd->cfq_slice_idle = cfq_slice_idle;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3837
  	cfqd->cfq_group_idle = cfq_group_idle;
963b72fc6   Jens Axboe   cfq-iosched: rena...
3838
  	cfqd->cfq_latency = 1;
e459dd08f   Corrado Zoccolo   cfq-iosched: fix ...
3839
  	cfqd->hw_tag = -1;
edc71131c   Corrado Zoccolo   cfq-iosched: comm...
3840
3841
3842
3843
  	/*
  	 * we optimistically start assuming sync ops weren't delayed in last
  	 * second, in order to have larger depth for async operations.
  	 */
573412b29   Corrado Zoccolo   cfq-iosched: redu...
3844
  	cfqd->last_delayed_sync = jiffies - HZ;
bc1c11697   Jens Axboe   [PATCH] elevator ...
3845
  	return cfqd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3846
3847
3848
3849
  }
  
  static void cfq_slab_kill(void)
  {
d6de8be71   Jens Axboe   cfq-iosched: fix ...
3850
3851
3852
3853
  	/*
  	 * Caller already ensured that pending RCU callbacks are completed,
  	 * so we should have no busy allocations at this point.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3854
3855
3856
3857
3858
3859
3860
3861
  	if (cfq_pool)
  		kmem_cache_destroy(cfq_pool);
  	if (cfq_ioc_pool)
  		kmem_cache_destroy(cfq_ioc_pool);
  }
  
  static int __init cfq_slab_setup(void)
  {
0a31bd5f2   Christoph Lameter   KMEM_CACHE(): sim...
3862
  	cfq_pool = KMEM_CACHE(cfq_queue, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3863
3864
  	if (!cfq_pool)
  		goto fail;
34e6bbf23   Fabio Checconi   cfq-iosched: fix ...
3865
  	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3866
3867
3868
3869
3870
3871
3872
3873
  	if (!cfq_ioc_pool)
  		goto fail;
  
  	return 0;
  fail:
  	cfq_slab_kill();
  	return -ENOMEM;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3874
3875
3876
  /*
   * sysfs parts below -->
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
  static ssize_t
  cfq_var_show(unsigned int var, char *page)
  {
  	return sprintf(page, "%d
  ", var);
  }
  
  static ssize_t
  cfq_var_store(unsigned int *var, const char *page, size_t count)
  {
  	char *p = (char *) page;
  
  	*var = simple_strtoul(p, &p, 10);
  	return count;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3892
  #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
b374d18a4   Jens Axboe   block: get rid of...
3893
  static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3894
  {									\
3d1ab40f4   Al Viro   [PATCH] elevator_...
3895
  	struct cfq_data *cfqd = e->elevator_data;			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3896
3897
3898
3899
3900
3901
  	unsigned int __data = __VAR;					\
  	if (__CONV)							\
  		__data = jiffies_to_msecs(__data);			\
  	return cfq_var_show(__data, (page));				\
  }
  SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3902
3903
  SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3904
3905
  SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3906
  SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3907
  SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3908
3909
3910
  SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc6   Jens Axboe   cfq-iosched: rena...
3911
  SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3912
3913
3914
  #undef SHOW_FUNCTION
  
  #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
b374d18a4   Jens Axboe   block: get rid of...
3915
  static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3916
  {									\
3d1ab40f4   Al Viro   [PATCH] elevator_...
3917
  	struct cfq_data *cfqd = e->elevator_data;			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
  	unsigned int __data;						\
  	int ret = cfq_var_store(&__data, (page), count);		\
  	if (__data < (MIN))						\
  		__data = (MIN);						\
  	else if (__data > (MAX))					\
  		__data = (MAX);						\
  	if (__CONV)							\
  		*(__PTR) = msecs_to_jiffies(__data);			\
  	else								\
  		*(__PTR) = __data;					\
  	return ret;							\
  }
  STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98e   Jens Axboe   cfq-iosched: make...
3931
3932
3933
3934
  STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
  		UINT_MAX, 1);
  STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
  		UINT_MAX, 1);
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3935
  STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98e   Jens Axboe   cfq-iosched: make...
3936
3937
  STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
  		UINT_MAX, 0);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3938
  STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3939
  STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
22e2c507c   Jens Axboe   [PATCH] Update cf...
3940
3941
  STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98e   Jens Axboe   cfq-iosched: make...
3942
3943
  STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
  		UINT_MAX, 0);
963b72fc6   Jens Axboe   cfq-iosched: rena...
3944
  STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3945
  #undef STORE_FUNCTION
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3946
3947
3948
3949
3950
  #define CFQ_ATTR(name) \
  	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  
  static struct elv_fs_entry cfq_attrs[] = {
  	CFQ_ATTR(quantum),
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3951
3952
3953
3954
3955
3956
3957
3958
  	CFQ_ATTR(fifo_expire_sync),
  	CFQ_ATTR(fifo_expire_async),
  	CFQ_ATTR(back_seek_max),
  	CFQ_ATTR(back_seek_penalty),
  	CFQ_ATTR(slice_sync),
  	CFQ_ATTR(slice_async),
  	CFQ_ATTR(slice_async_rq),
  	CFQ_ATTR(slice_idle),
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
3959
  	CFQ_ATTR(group_idle),
963b72fc6   Jens Axboe   cfq-iosched: rena...
3960
  	CFQ_ATTR(low_latency),
e572ec7e4   Al Viro   [PATCH] fix rmmod...
3961
  	__ATTR_NULL
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3962
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3963
3964
3965
3966
3967
  static struct elevator_type iosched_cfq = {
  	.ops = {
  		.elevator_merge_fn = 		cfq_merge,
  		.elevator_merged_fn =		cfq_merged_request,
  		.elevator_merge_req_fn =	cfq_merged_requests,
da7752650   Jens Axboe   [PATCH] cfq-iosch...
3968
  		.elevator_allow_merge_fn =	cfq_allow_merge,
812d40264   Divyesh Shah   blkio: Add io_mer...
3969
  		.elevator_bio_merged_fn =	cfq_bio_merged,
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3970
  		.elevator_dispatch_fn =		cfq_dispatch_requests,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3971
  		.elevator_add_req_fn =		cfq_insert_request,
b4878f245   Jens Axboe   [PATCH] 02/05: up...
3972
  		.elevator_activate_req_fn =	cfq_activate_request,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3973
  		.elevator_deactivate_req_fn =	cfq_deactivate_request,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3974
  		.elevator_completed_req_fn =	cfq_completed_request,
21183b07e   Jens Axboe   [PATCH] cfq-iosch...
3975
3976
  		.elevator_former_req_fn =	elv_rb_former_request,
  		.elevator_latter_req_fn =	elv_rb_latter_request,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3977
3978
3979
3980
3981
  		.elevator_set_req_fn =		cfq_set_request,
  		.elevator_put_req_fn =		cfq_put_request,
  		.elevator_may_queue_fn =	cfq_may_queue,
  		.elevator_init_fn =		cfq_init_queue,
  		.elevator_exit_fn =		cfq_exit_queue,
fc46379da   Jens Axboe   [PATCH] cfq-iosch...
3982
  		.trim =				cfq_free_io_context,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3983
  	},
3d1ab40f4   Al Viro   [PATCH] elevator_...
3984
  	.elevator_attrs =	cfq_attrs,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3985
3986
3987
  	.elevator_name =	"cfq",
  	.elevator_owner =	THIS_MODULE,
  };
3e2520668   Vivek Goyal   blkio: Implement ...
3988
3989
3990
3991
3992
3993
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  static struct blkio_policy_type blkio_policy_cfq = {
  	.ops = {
  		.blkio_unlink_group_fn =	cfq_unlink_blkio_group,
  		.blkio_update_group_weight_fn =	cfq_update_blkio_group_weight,
  	},
062a644d6   Vivek Goyal   blk-cgroup: Prepa...
3994
  	.plid = BLKIO_POLICY_PROP,
3e2520668   Vivek Goyal   blkio: Implement ...
3995
3996
3997
3998
  };
  #else
  static struct blkio_policy_type blkio_policy_cfq;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3999
4000
  static int __init cfq_init(void)
  {
22e2c507c   Jens Axboe   [PATCH] Update cf...
4001
4002
4003
4004
4005
4006
4007
  	/*
  	 * could be 0 on HZ < 1000 setups
  	 */
  	if (!cfq_slice_async)
  		cfq_slice_async = 1;
  	if (!cfq_slice_idle)
  		cfq_slice_idle = 1;
80bdf0c78   Vivek Goyal   cfq-iosched: Impl...
4008
4009
4010
4011
4012
4013
  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  	if (!cfq_group_idle)
  		cfq_group_idle = 1;
  #else
  		cfq_group_idle = 0;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
4014
4015
  	if (cfq_slab_setup())
  		return -ENOMEM;
2fdd82bd8   Adrian Bunk   block: let elv_re...
4016
  	elv_register(&iosched_cfq);
3e2520668   Vivek Goyal   blkio: Implement ...
4017
  	blkio_policy_register(&blkio_policy_cfq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
4018

2fdd82bd8   Adrian Bunk   block: let elv_re...
4019
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
4020
4021
4022
4023
  }
  
  static void __exit cfq_exit(void)
  {
6e9a4738c   Peter Zijlstra   [PATCH] completio...
4024
  	DECLARE_COMPLETION_ONSTACK(all_gone);
3e2520668   Vivek Goyal   blkio: Implement ...
4025
  	blkio_policy_unregister(&blkio_policy_cfq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
4026
  	elv_unregister(&iosched_cfq);
334e94de9   Al Viro   [PATCH] deal with...
4027
  	ioc_gone = &all_gone;
fba822722   OGAWA Hirofumi   [PATCH 1/2] iosch...
4028
4029
  	/* ioc_gone's update must be visible before reading ioc_count */
  	smp_wmb();
d6de8be71   Jens Axboe   cfq-iosched: fix ...
4030
4031
4032
4033
4034
  
  	/*
  	 * this also protects us from entering cfq_slab_kill() with
  	 * pending RCU callbacks
  	 */
245b2e70e   Tejun Heo   percpu: clean up ...
4035
  	if (elv_ioc_count_read(cfq_ioc_count))
9a11b4ed0   Jens Axboe   cfq-iosched: prop...
4036
  		wait_for_completion(&all_gone);
80b15c738   Konstantin Khlebnikov   cfq-iosched: comp...
4037
  	ida_destroy(&cic_index_ida);
83521d3eb   Christoph Hellwig   [PATCH] cfq-iosch...
4038
  	cfq_slab_kill();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
4039
4040
4041
4042
4043
4044
4045
4046
  }
  
  module_init(cfq_init);
  module_exit(cfq_exit);
  
  MODULE_AUTHOR("Jens Axboe");
  MODULE_LICENSE("GPL");
  MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");