Blame view

block/mq-deadline.c 30.1 KB
3dcf60bcb   Christoph Hellwig   block: add SPDX t...
1
  // SPDX-License-Identifier: GPL-2.0
945ffb60c   Jens Axboe   mq-deadline: add ...
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
  /*
   *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
   *  for the blk-mq scheduling framework
   *
   *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
   */
  #include <linux/kernel.h>
  #include <linux/fs.h>
  #include <linux/blkdev.h>
  #include <linux/blk-mq.h>
  #include <linux/elevator.h>
  #include <linux/bio.h>
  #include <linux/module.h>
  #include <linux/slab.h>
  #include <linux/init.h>
  #include <linux/compiler.h>
  #include <linux/rbtree.h>
  #include <linux/sbitmap.h>
b357e4a69   Chaitanya Kulkarni   block: get rid of...
20
  #include <trace/events/block.h>
945ffb60c   Jens Axboe   mq-deadline: add ...
21
22
  #include "blk.h"
  #include "blk-mq.h"
daaadb3e9   Omar Sandoval   mq-deadline: add ...
23
  #include "blk-mq-debugfs.h"
945ffb60c   Jens Axboe   mq-deadline: add ...
24
25
26
27
  #include "blk-mq-tag.h"
  #include "blk-mq-sched.h"
  
  /*
898bd37a9   Mauro Carvalho Chehab   docs: block: conv...
28
   * See Documentation/block/deadline-iosched.rst
945ffb60c   Jens Axboe   mq-deadline: add ...
29
30
31
32
33
34
   */
  static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
  static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
  static const int writes_starved = 2;    /* max times reads can starve a write */
  static const int fifo_batch = 16;       /* # of sequential requests treated as one
  				     by the above parameters. For throughput. */
004a26b32   Bart Van Assche   block/mq-deadline...
35
36
37
38
39
40
  enum dd_data_dir {
  	DD_READ		= READ,
  	DD_WRITE	= WRITE,
  };
  
  enum { DD_DIR_COUNT = 2 };
c807ab520   Bart Van Assche   block/mq-deadline...
41
42
43
44
45
46
47
48
  enum dd_prio {
  	DD_RT_PRIO	= 0,
  	DD_BE_PRIO	= 1,
  	DD_IDLE_PRIO	= 2,
  	DD_PRIO_MAX	= 2,
  };
  
  enum { DD_PRIO_COUNT = 3 };
0f7839955   Tejun Heo   Revert "block/mq-...
49
50
51
52
53
54
55
  /* I/O statistics per I/O priority. */
  struct io_stats_per_prio {
  	local_t inserted;
  	local_t merged;
  	local_t dispatched;
  	local_t completed;
  };
38ba64d12   Bart Van Assche   block/mq-deadline...
56
57
58
59
  /* I/O statistics for all I/O priorities (enum dd_prio). */
  struct io_stats {
  	struct io_stats_per_prio stats[DD_PRIO_COUNT];
  };
c807ab520   Bart Van Assche   block/mq-deadline...
60
61
62
63
64
65
66
67
68
69
70
  /*
   * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
   * present on both sort_list[] and fifo_list[].
   */
  struct dd_per_prio {
  	struct list_head dispatch;
  	struct rb_root sort_list[DD_DIR_COUNT];
  	struct list_head fifo_list[DD_DIR_COUNT];
  	/* Next request in FIFO order. Read, write or both are NULL. */
  	struct request *next_rq[DD_DIR_COUNT];
  };
945ffb60c   Jens Axboe   mq-deadline: add ...
71
72
73
74
  struct deadline_data {
  	/*
  	 * run time data
  	 */
c807ab520   Bart Van Assche   block/mq-deadline...
75
  	struct dd_per_prio per_prio[DD_PRIO_COUNT];
945ffb60c   Jens Axboe   mq-deadline: add ...
76

d672d325b   Bart Van Assche   block/mq-deadline...
77
78
  	/* Data direction of latest dispatched request. */
  	enum dd_data_dir last_dir;
945ffb60c   Jens Axboe   mq-deadline: add ...
79
80
  	unsigned int batching;		/* number of sequential requests made */
  	unsigned int starved;		/* times reads have starved writes */
38ba64d12   Bart Van Assche   block/mq-deadline...
81
  	struct io_stats __percpu *stats;
945ffb60c   Jens Axboe   mq-deadline: add ...
82
83
84
  	/*
  	 * settings that change how the i/o scheduler behaves
  	 */
004a26b32   Bart Van Assche   block/mq-deadline...
85
  	int fifo_expire[DD_DIR_COUNT];
945ffb60c   Jens Axboe   mq-deadline: add ...
86
87
88
  	int fifo_batch;
  	int writes_starved;
  	int front_merges;
07757588e   Bart Van Assche   block/mq-deadline...
89
  	u32 async_depth;
945ffb60c   Jens Axboe   mq-deadline: add ...
90
91
  
  	spinlock_t lock;
5700f6917   Damien Le Moal   mq-deadline: Intr...
92
  	spinlock_t zone_lock;
c807ab520   Bart Van Assche   block/mq-deadline...
93
  };
38ba64d12   Bart Van Assche   block/mq-deadline...
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
  /* Count one event of type 'event_type' and with I/O priority 'prio' */
  #define dd_count(dd, event_type, prio) do {				\
  	struct io_stats *io_stats = get_cpu_ptr((dd)->stats);		\
  									\
  	BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));	\
  	BUILD_BUG_ON(!__same_type((prio), enum dd_prio));		\
  	local_inc(&io_stats->stats[(prio)].event_type);			\
  	put_cpu_ptr(io_stats);						\
  } while (0)
  
  /*
   * Returns the total number of dd_count(dd, event_type, prio) calls across all
   * CPUs. No locking or barriers since it is fine if the returned sum is slightly
   * outdated.
   */
  #define dd_sum(dd, event_type, prio) ({					\
  	unsigned int cpu;						\
  	u32 sum = 0;							\
  									\
  	BUILD_BUG_ON(!__same_type((dd), struct deadline_data *));	\
  	BUILD_BUG_ON(!__same_type((prio), enum dd_prio));		\
  	for_each_present_cpu(cpu)					\
  		sum += local_read(&per_cpu_ptr((dd)->stats, cpu)->	\
  				  stats[(prio)].event_type);		\
  	sum;								\
  })
c807ab520   Bart Van Assche   block/mq-deadline...
120
121
122
123
124
125
  /* Maps an I/O priority class to a deadline scheduler priority. */
  static const enum dd_prio ioprio_class_to_prio[] = {
  	[IOPRIO_CLASS_NONE]	= DD_BE_PRIO,
  	[IOPRIO_CLASS_RT]	= DD_RT_PRIO,
  	[IOPRIO_CLASS_BE]	= DD_BE_PRIO,
  	[IOPRIO_CLASS_IDLE]	= DD_IDLE_PRIO,
945ffb60c   Jens Axboe   mq-deadline: add ...
126
127
128
  };
  
  static inline struct rb_root *
c807ab520   Bart Van Assche   block/mq-deadline...
129
  deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
945ffb60c   Jens Axboe   mq-deadline: add ...
130
  {
c807ab520   Bart Van Assche   block/mq-deadline...
131
132
133
134
135
136
137
138
139
140
  	return &per_prio->sort_list[rq_data_dir(rq)];
  }
  
  /*
   * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
   * request.
   */
  static u8 dd_rq_ioclass(struct request *rq)
  {
  	return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
945ffb60c   Jens Axboe   mq-deadline: add ...
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
  }
  
  /*
   * get the request after `rq' in sector-sorted order
   */
  static inline struct request *
  deadline_latter_request(struct request *rq)
  {
  	struct rb_node *node = rb_next(&rq->rb_node);
  
  	if (node)
  		return rb_entry_rq(node);
  
  	return NULL;
  }
  
  static void
c807ab520   Bart Van Assche   block/mq-deadline...
158
  deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
945ffb60c   Jens Axboe   mq-deadline: add ...
159
  {
c807ab520   Bart Van Assche   block/mq-deadline...
160
  	struct rb_root *root = deadline_rb_root(per_prio, rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
161
162
163
164
165
  
  	elv_rb_add(root, rq);
  }
  
  static inline void
c807ab520   Bart Van Assche   block/mq-deadline...
166
  deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
945ffb60c   Jens Axboe   mq-deadline: add ...
167
  {
004a26b32   Bart Van Assche   block/mq-deadline...
168
  	const enum dd_data_dir data_dir = rq_data_dir(rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
169

c807ab520   Bart Van Assche   block/mq-deadline...
170
171
  	if (per_prio->next_rq[data_dir] == rq)
  		per_prio->next_rq[data_dir] = deadline_latter_request(rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
172

c807ab520   Bart Van Assche   block/mq-deadline...
173
  	elv_rb_del(deadline_rb_root(per_prio, rq), rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
174
175
176
177
178
  }
  
  /*
   * remove rq from rbtree and fifo.
   */
c807ab520   Bart Van Assche   block/mq-deadline...
179
180
181
  static void deadline_remove_request(struct request_queue *q,
  				    struct dd_per_prio *per_prio,
  				    struct request *rq)
945ffb60c   Jens Axboe   mq-deadline: add ...
182
  {
945ffb60c   Jens Axboe   mq-deadline: add ...
183
184
185
186
187
188
  	list_del_init(&rq->queuelist);
  
  	/*
  	 * We might not be on the rbtree, if we are doing an insert merge
  	 */
  	if (!RB_EMPTY_NODE(&rq->rb_node))
c807ab520   Bart Van Assche   block/mq-deadline...
189
  		deadline_del_rq_rb(per_prio, rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
190
191
192
193
194
195
196
  
  	elv_rqhash_del(q, rq);
  	if (q->last_merge == rq)
  		q->last_merge = NULL;
  }
  
  static void dd_request_merged(struct request_queue *q, struct request *req,
34fe7c054   Christoph Hellwig   block: enumify EL...
197
  			      enum elv_merge type)
945ffb60c   Jens Axboe   mq-deadline: add ...
198
199
  {
  	struct deadline_data *dd = q->elevator->elevator_data;
c807ab520   Bart Van Assche   block/mq-deadline...
200
201
202
  	const u8 ioprio_class = dd_rq_ioclass(req);
  	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
  	struct dd_per_prio *per_prio = &dd->per_prio[prio];
945ffb60c   Jens Axboe   mq-deadline: add ...
203
204
205
206
207
  
  	/*
  	 * if the merge was a front merge, we need to reposition request
  	 */
  	if (type == ELEVATOR_FRONT_MERGE) {
c807ab520   Bart Van Assche   block/mq-deadline...
208
209
  		elv_rb_del(deadline_rb_root(per_prio, req), req);
  		deadline_add_rq_rb(per_prio, req);
945ffb60c   Jens Axboe   mq-deadline: add ...
210
211
  	}
  }
46eae2e32   Bart Van Assche   block/mq-deadline...
212
213
214
  /*
   * Callback function that is invoked after @next has been merged into @req.
   */
945ffb60c   Jens Axboe   mq-deadline: add ...
215
216
217
  static void dd_merged_requests(struct request_queue *q, struct request *req,
  			       struct request *next)
  {
38ba64d12   Bart Van Assche   block/mq-deadline...
218
  	struct deadline_data *dd = q->elevator->elevator_data;
c807ab520   Bart Van Assche   block/mq-deadline...
219
220
  	const u8 ioprio_class = dd_rq_ioclass(next);
  	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
38ba64d12   Bart Van Assche   block/mq-deadline...
221
  	dd_count(dd, merged, prio);
945ffb60c   Jens Axboe   mq-deadline: add ...
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
  	/*
  	 * if next expires before rq, assign its expire time to rq
  	 * and move into next position (next will be deleted) in fifo
  	 */
  	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
  		if (time_before((unsigned long)next->fifo_time,
  				(unsigned long)req->fifo_time)) {
  			list_move(&req->queuelist, &next->queuelist);
  			req->fifo_time = next->fifo_time;
  		}
  	}
  
  	/*
  	 * kill knowledge of next, this one is a goner
  	 */
c807ab520   Bart Van Assche   block/mq-deadline...
237
  	deadline_remove_request(q, &dd->per_prio[prio], next);
945ffb60c   Jens Axboe   mq-deadline: add ...
238
239
240
241
242
243
  }
  
  /*
   * move an entry to dispatch queue
   */
  static void
c807ab520   Bart Van Assche   block/mq-deadline...
244
245
  deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
  		      struct request *rq)
945ffb60c   Jens Axboe   mq-deadline: add ...
246
  {
004a26b32   Bart Van Assche   block/mq-deadline...
247
  	const enum dd_data_dir data_dir = rq_data_dir(rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
248

c807ab520   Bart Van Assche   block/mq-deadline...
249
  	per_prio->next_rq[data_dir] = deadline_latter_request(rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
250
251
252
253
  
  	/*
  	 * take it off the sort and fifo list
  	 */
c807ab520   Bart Van Assche   block/mq-deadline...
254
  	deadline_remove_request(rq->q, per_prio, rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
255
256
257
258
259
260
  }
  
  /*
   * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
   * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
   */
c807ab520   Bart Van Assche   block/mq-deadline...
261
  static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
004a26b32   Bart Van Assche   block/mq-deadline...
262
  				      enum dd_data_dir data_dir)
945ffb60c   Jens Axboe   mq-deadline: add ...
263
  {
c807ab520   Bart Van Assche   block/mq-deadline...
264
  	struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
945ffb60c   Jens Axboe   mq-deadline: add ...
265
266
267
268
269
270
271
272
273
274
275
  
  	/*
  	 * rq is expired!
  	 */
  	if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
  		return 1;
  
  	return 0;
  }
  
  /*
bf09ce56f   Damien Le Moal   mq-deadline: Intr...
276
277
278
279
   * For the specified data direction, return the next request to
   * dispatch using arrival ordered lists.
   */
  static struct request *
c807ab520   Bart Van Assche   block/mq-deadline...
280
281
  deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
  		      enum dd_data_dir data_dir)
bf09ce56f   Damien Le Moal   mq-deadline: Intr...
282
  {
5700f6917   Damien Le Moal   mq-deadline: Intr...
283
284
  	struct request *rq;
  	unsigned long flags;
c807ab520   Bart Van Assche   block/mq-deadline...
285
  	if (list_empty(&per_prio->fifo_list[data_dir]))
bf09ce56f   Damien Le Moal   mq-deadline: Intr...
286
  		return NULL;
c807ab520   Bart Van Assche   block/mq-deadline...
287
  	rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
004a26b32   Bart Van Assche   block/mq-deadline...
288
  	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
5700f6917   Damien Le Moal   mq-deadline: Intr...
289
290
291
292
293
294
295
  		return rq;
  
  	/*
  	 * Look for a write request that can be dispatched, that is one with
  	 * an unlocked target zone.
  	 */
  	spin_lock_irqsave(&dd->zone_lock, flags);
c807ab520   Bart Van Assche   block/mq-deadline...
296
  	list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
5700f6917   Damien Le Moal   mq-deadline: Intr...
297
298
299
300
301
302
303
304
  		if (blk_req_can_dispatch_to_zone(rq))
  			goto out;
  	}
  	rq = NULL;
  out:
  	spin_unlock_irqrestore(&dd->zone_lock, flags);
  
  	return rq;
bf09ce56f   Damien Le Moal   mq-deadline: Intr...
305
306
307
308
309
310
311
  }
  
  /*
   * For the specified data direction, return the next request to
   * dispatch using sector position sorted lists.
   */
  static struct request *
c807ab520   Bart Van Assche   block/mq-deadline...
312
313
  deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
  		      enum dd_data_dir data_dir)
bf09ce56f   Damien Le Moal   mq-deadline: Intr...
314
  {
5700f6917   Damien Le Moal   mq-deadline: Intr...
315
316
  	struct request *rq;
  	unsigned long flags;
c807ab520   Bart Van Assche   block/mq-deadline...
317
  	rq = per_prio->next_rq[data_dir];
5700f6917   Damien Le Moal   mq-deadline: Intr...
318
319
  	if (!rq)
  		return NULL;
004a26b32   Bart Van Assche   block/mq-deadline...
320
  	if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
5700f6917   Damien Le Moal   mq-deadline: Intr...
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
  		return rq;
  
  	/*
  	 * Look for a write request that can be dispatched, that is one with
  	 * an unlocked target zone.
  	 */
  	spin_lock_irqsave(&dd->zone_lock, flags);
  	while (rq) {
  		if (blk_req_can_dispatch_to_zone(rq))
  			break;
  		rq = deadline_latter_request(rq);
  	}
  	spin_unlock_irqrestore(&dd->zone_lock, flags);
  
  	return rq;
bf09ce56f   Damien Le Moal   mq-deadline: Intr...
336
337
338
  }
  
  /*
945ffb60c   Jens Axboe   mq-deadline: add ...
339
   * deadline_dispatch_requests selects the best request according to
7b05bf771   Jens Axboe   Revert "block/mq-...
340
   * read/write expire, fifo_batch, etc
945ffb60c   Jens Axboe   mq-deadline: add ...
341
   */
c807ab520   Bart Van Assche   block/mq-deadline...
342
  static struct request *__dd_dispatch_request(struct deadline_data *dd,
7b05bf771   Jens Axboe   Revert "block/mq-...
343
  					     struct dd_per_prio *per_prio)
945ffb60c   Jens Axboe   mq-deadline: add ...
344
  {
bf09ce56f   Damien Le Moal   mq-deadline: Intr...
345
  	struct request *rq, *next_rq;
004a26b32   Bart Van Assche   block/mq-deadline...
346
  	enum dd_data_dir data_dir;
38ba64d12   Bart Van Assche   block/mq-deadline...
347
348
  	enum dd_prio prio;
  	u8 ioprio_class;
945ffb60c   Jens Axboe   mq-deadline: add ...
349

3bd473f41   Bart Van Assche   block/mq-deadline...
350
  	lockdep_assert_held(&dd->lock);
c807ab520   Bart Van Assche   block/mq-deadline...
351
352
353
  	if (!list_empty(&per_prio->dispatch)) {
  		rq = list_first_entry(&per_prio->dispatch, struct request,
  				      queuelist);
945ffb60c   Jens Axboe   mq-deadline: add ...
354
355
356
  		list_del_init(&rq->queuelist);
  		goto done;
  	}
945ffb60c   Jens Axboe   mq-deadline: add ...
357
358
359
  	/*
  	 * batches are currently reads XOR writes
  	 */
c807ab520   Bart Van Assche   block/mq-deadline...
360
  	rq = deadline_next_request(dd, per_prio, dd->last_dir);
945ffb60c   Jens Axboe   mq-deadline: add ...
361
362
363
364
365
366
367
368
  	if (rq && dd->batching < dd->fifo_batch)
  		/* we have a next request are still entitled to batch */
  		goto dispatch_request;
  
  	/*
  	 * at this point we are not running a batch. select the appropriate
  	 * data direction (read / write)
  	 */
c807ab520   Bart Van Assche   block/mq-deadline...
369
370
  	if (!list_empty(&per_prio->fifo_list[DD_READ])) {
  		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
945ffb60c   Jens Axboe   mq-deadline: add ...
371

c807ab520   Bart Van Assche   block/mq-deadline...
372
  		if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
5700f6917   Damien Le Moal   mq-deadline: Intr...
373
  		    (dd->starved++ >= dd->writes_starved))
945ffb60c   Jens Axboe   mq-deadline: add ...
374
  			goto dispatch_writes;
004a26b32   Bart Van Assche   block/mq-deadline...
375
  		data_dir = DD_READ;
945ffb60c   Jens Axboe   mq-deadline: add ...
376
377
378
379
380
381
382
  
  		goto dispatch_find_request;
  	}
  
  	/*
  	 * there are either no reads or writes have been starved
  	 */
c807ab520   Bart Van Assche   block/mq-deadline...
383
  	if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
945ffb60c   Jens Axboe   mq-deadline: add ...
384
  dispatch_writes:
c807ab520   Bart Van Assche   block/mq-deadline...
385
  		BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
945ffb60c   Jens Axboe   mq-deadline: add ...
386
387
  
  		dd->starved = 0;
004a26b32   Bart Van Assche   block/mq-deadline...
388
  		data_dir = DD_WRITE;
945ffb60c   Jens Axboe   mq-deadline: add ...
389
390
391
392
393
394
395
396
397
398
  
  		goto dispatch_find_request;
  	}
  
  	return NULL;
  
  dispatch_find_request:
  	/*
  	 * we are not running a batch, find best request for selected data_dir
  	 */
c807ab520   Bart Van Assche   block/mq-deadline...
399
400
  	next_rq = deadline_next_request(dd, per_prio, data_dir);
  	if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
945ffb60c   Jens Axboe   mq-deadline: add ...
401
402
403
404
405
  		/*
  		 * A deadline has expired, the last request was in the other
  		 * direction, or we have run out of higher-sectored requests.
  		 * Start again from the request with the earliest expiry time.
  		 */
c807ab520   Bart Van Assche   block/mq-deadline...
406
  		rq = deadline_fifo_request(dd, per_prio, data_dir);
945ffb60c   Jens Axboe   mq-deadline: add ...
407
408
409
410
411
  	} else {
  		/*
  		 * The last req was the same dir and we have a next request in
  		 * sort order. No expired requests so continue on from here.
  		 */
bf09ce56f   Damien Le Moal   mq-deadline: Intr...
412
  		rq = next_rq;
945ffb60c   Jens Axboe   mq-deadline: add ...
413
  	}
5700f6917   Damien Le Moal   mq-deadline: Intr...
414
415
416
417
418
419
  	/*
  	 * For a zoned block device, if we only have writes queued and none of
  	 * them can be dispatched, rq will be NULL.
  	 */
  	if (!rq)
  		return NULL;
d672d325b   Bart Van Assche   block/mq-deadline...
420
  	dd->last_dir = data_dir;
945ffb60c   Jens Axboe   mq-deadline: add ...
421
422
423
424
425
426
427
  	dd->batching = 0;
  
  dispatch_request:
  	/*
  	 * rq is the selected appropriate request.
  	 */
  	dd->batching++;
c807ab520   Bart Van Assche   block/mq-deadline...
428
  	deadline_move_request(dd, per_prio, rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
429
  done:
38ba64d12   Bart Van Assche   block/mq-deadline...
430
431
432
  	ioprio_class = dd_rq_ioclass(rq);
  	prio = ioprio_class_to_prio[ioprio_class];
  	dd_count(dd, dispatched, prio);
5700f6917   Damien Le Moal   mq-deadline: Intr...
433
434
435
436
  	/*
  	 * If the request needs its target zone locked, do it.
  	 */
  	blk_req_zone_write_lock(rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
437
438
439
  	rq->rq_flags |= RQF_STARTED;
  	return rq;
  }
ca11f209a   Jens Axboe   mq-deadline: make...
440
  /*
46eae2e32   Bart Van Assche   block/mq-deadline...
441
442
   * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
   *
ca11f209a   Jens Axboe   mq-deadline: make...
443
   * One confusing aspect here is that we get called for a specific
7211aef86   Damien Le Moal   block: mq-deadlin...
444
   * hardware queue, but we may return a request that is for a
ca11f209a   Jens Axboe   mq-deadline: make...
445
446
447
   * different hardware queue. This is because mq-deadline has shared
   * state for all hardware queues, in terms of sorting, FIFOs, etc.
   */
c13660a08   Jens Axboe   blk-mq-sched: cha...
448
  static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
945ffb60c   Jens Axboe   mq-deadline: add ...
449
450
  {
  	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
7b05bf771   Jens Axboe   Revert "block/mq-...
451
  	struct request *rq;
c807ab520   Bart Van Assche   block/mq-deadline...
452
  	enum dd_prio prio;
945ffb60c   Jens Axboe   mq-deadline: add ...
453
454
  
  	spin_lock(&dd->lock);
fb926032b   Bart Van Assche   block/mq-deadline...
455
  	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
7b05bf771   Jens Axboe   Revert "block/mq-...
456
457
  		rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
  		if (rq)
c807ab520   Bart Van Assche   block/mq-deadline...
458
459
  			break;
  	}
945ffb60c   Jens Axboe   mq-deadline: add ...
460
  	spin_unlock(&dd->lock);
c13660a08   Jens Axboe   blk-mq-sched: cha...
461
462
  
  	return rq;
945ffb60c   Jens Axboe   mq-deadline: add ...
463
  }
07757588e   Bart Van Assche   block/mq-deadline...
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
  /*
   * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
   * function is used by __blk_mq_get_tag().
   */
  static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
  {
  	struct deadline_data *dd = data->q->elevator->elevator_data;
  
  	/* Do not throttle synchronous reads. */
  	if (op_is_sync(op) && !op_is_write(op))
  		return;
  
  	/*
  	 * Throttle asynchronous requests and writes such that these requests
  	 * do not block the allocation of synchronous requests.
  	 */
  	data->shallow_depth = dd->async_depth;
  }
  
  /* Called by blk_mq_update_nr_requests(). */
  static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
  {
  	struct request_queue *q = hctx->queue;
  	struct deadline_data *dd = q->elevator->elevator_data;
  	struct blk_mq_tags *tags = hctx->sched_tags;
  
  	dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
  
  	sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
  }
  
  /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
  static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
  {
  	dd_depth_updated(hctx);
  	return 0;
  }
3e9a99eba   Bart Van Assche   block/mq-deadline...
501
  static void dd_exit_sched(struct elevator_queue *e)
945ffb60c   Jens Axboe   mq-deadline: add ...
502
503
  {
  	struct deadline_data *dd = e->elevator_data;
c807ab520   Bart Van Assche   block/mq-deadline...
504
  	enum dd_prio prio;
945ffb60c   Jens Axboe   mq-deadline: add ...
505

c807ab520   Bart Van Assche   block/mq-deadline...
506
507
508
509
510
511
  	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
  		struct dd_per_prio *per_prio = &dd->per_prio[prio];
  
  		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
  		WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
  	}
945ffb60c   Jens Axboe   mq-deadline: add ...
512

38ba64d12   Bart Van Assche   block/mq-deadline...
513
  	free_percpu(dd->stats);
945ffb60c   Jens Axboe   mq-deadline: add ...
514
515
516
517
  	kfree(dd);
  }
  
  /*
0f7839955   Tejun Heo   Revert "block/mq-...
518
   * initialize elevator private data (deadline_data).
945ffb60c   Jens Axboe   mq-deadline: add ...
519
   */
3e9a99eba   Bart Van Assche   block/mq-deadline...
520
  static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
945ffb60c   Jens Axboe   mq-deadline: add ...
521
522
523
  {
  	struct deadline_data *dd;
  	struct elevator_queue *eq;
c807ab520   Bart Van Assche   block/mq-deadline...
524
525
  	enum dd_prio prio;
  	int ret = -ENOMEM;
945ffb60c   Jens Axboe   mq-deadline: add ...
526
527
528
  
  	eq = elevator_alloc(q, e);
  	if (!eq)
c807ab520   Bart Van Assche   block/mq-deadline...
529
  		return ret;
945ffb60c   Jens Axboe   mq-deadline: add ...
530
531
  
  	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
c807ab520   Bart Van Assche   block/mq-deadline...
532
533
  	if (!dd)
  		goto put_eq;
945ffb60c   Jens Axboe   mq-deadline: add ...
534
  	eq->elevator_data = dd;
38ba64d12   Bart Van Assche   block/mq-deadline...
535
536
537
538
  	dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
  				     GFP_KERNEL | __GFP_ZERO);
  	if (!dd->stats)
  		goto free_dd;
c807ab520   Bart Van Assche   block/mq-deadline...
539
540
541
542
543
544
545
546
547
  	for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
  		struct dd_per_prio *per_prio = &dd->per_prio[prio];
  
  		INIT_LIST_HEAD(&per_prio->dispatch);
  		INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
  		INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
  		per_prio->sort_list[DD_READ] = RB_ROOT;
  		per_prio->sort_list[DD_WRITE] = RB_ROOT;
  	}
004a26b32   Bart Van Assche   block/mq-deadline...
548
549
  	dd->fifo_expire[DD_READ] = read_expire;
  	dd->fifo_expire[DD_WRITE] = write_expire;
945ffb60c   Jens Axboe   mq-deadline: add ...
550
551
  	dd->writes_starved = writes_starved;
  	dd->front_merges = 1;
d672d325b   Bart Van Assche   block/mq-deadline...
552
  	dd->last_dir = DD_WRITE;
945ffb60c   Jens Axboe   mq-deadline: add ...
553
554
  	dd->fifo_batch = fifo_batch;
  	spin_lock_init(&dd->lock);
5700f6917   Damien Le Moal   mq-deadline: Intr...
555
  	spin_lock_init(&dd->zone_lock);
945ffb60c   Jens Axboe   mq-deadline: add ...
556
557
558
  
  	q->elevator = eq;
  	return 0;
c807ab520   Bart Van Assche   block/mq-deadline...
559

38ba64d12   Bart Van Assche   block/mq-deadline...
560
561
  free_dd:
  	kfree(dd);
c807ab520   Bart Van Assche   block/mq-deadline...
562
563
564
  put_eq:
  	kobject_put(&eq->kobj);
  	return ret;
945ffb60c   Jens Axboe   mq-deadline: add ...
565
  }
46eae2e32   Bart Van Assche   block/mq-deadline...
566
567
568
569
  /*
   * Try to merge @bio into an existing request. If @bio has been merged into
   * an existing request, store the pointer to that request into *@rq.
   */
945ffb60c   Jens Axboe   mq-deadline: add ...
570
571
572
573
  static int dd_request_merge(struct request_queue *q, struct request **rq,
  			    struct bio *bio)
  {
  	struct deadline_data *dd = q->elevator->elevator_data;
c807ab520   Bart Van Assche   block/mq-deadline...
574
575
576
  	const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
  	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
  	struct dd_per_prio *per_prio = &dd->per_prio[prio];
945ffb60c   Jens Axboe   mq-deadline: add ...
577
578
579
580
581
  	sector_t sector = bio_end_sector(bio);
  	struct request *__rq;
  
  	if (!dd->front_merges)
  		return ELEVATOR_NO_MERGE;
c807ab520   Bart Van Assche   block/mq-deadline...
582
  	__rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
945ffb60c   Jens Axboe   mq-deadline: add ...
583
584
585
586
587
  	if (__rq) {
  		BUG_ON(sector != blk_rq_pos(__rq));
  
  		if (elv_bio_merge_ok(__rq, bio)) {
  			*rq = __rq;
866663b7b   Ming Lei   block: return ELE...
588
589
  			if (blk_discard_mergable(__rq))
  				return ELEVATOR_DISCARD_MERGE;
945ffb60c   Jens Axboe   mq-deadline: add ...
590
591
592
593
594
595
  			return ELEVATOR_FRONT_MERGE;
  		}
  	}
  
  	return ELEVATOR_NO_MERGE;
  }
46eae2e32   Bart Van Assche   block/mq-deadline...
596
597
598
599
  /*
   * Attempt to merge a bio into an existing request. This function is called
   * before @bio is associated with a request.
   */
efed9a333   Omar Sandoval   kyber: fix out of...
600
  static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
14ccb66b3   Christoph Hellwig   block: remove the...
601
  		unsigned int nr_segs)
945ffb60c   Jens Axboe   mq-deadline: add ...
602
  {
945ffb60c   Jens Axboe   mq-deadline: add ...
603
  	struct deadline_data *dd = q->elevator->elevator_data;
e4d750c97   Jens Axboe   block: free merge...
604
605
  	struct request *free = NULL;
  	bool ret;
945ffb60c   Jens Axboe   mq-deadline: add ...
606
607
  
  	spin_lock(&dd->lock);
14ccb66b3   Christoph Hellwig   block: remove the...
608
  	ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
945ffb60c   Jens Axboe   mq-deadline: add ...
609
  	spin_unlock(&dd->lock);
e4d750c97   Jens Axboe   block: free merge...
610
611
  	if (free)
  		blk_mq_free_request(free);
945ffb60c   Jens Axboe   mq-deadline: add ...
612
613
614
615
616
617
618
619
620
621
622
  	return ret;
  }
  
  /*
   * add rq to rbtree and fifo
   */
  static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  			      bool at_head)
  {
  	struct request_queue *q = hctx->queue;
  	struct deadline_data *dd = q->elevator->elevator_data;
004a26b32   Bart Van Assche   block/mq-deadline...
623
  	const enum dd_data_dir data_dir = rq_data_dir(rq);
c807ab520   Bart Van Assche   block/mq-deadline...
624
625
626
627
  	u16 ioprio = req_get_ioprio(rq);
  	u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
  	struct dd_per_prio *per_prio;
  	enum dd_prio prio;
fd2ef39cc   Jan Kara   blk: Fix lock inv...
628
  	LIST_HEAD(free);
945ffb60c   Jens Axboe   mq-deadline: add ...
629

3bd473f41   Bart Van Assche   block/mq-deadline...
630
  	lockdep_assert_held(&dd->lock);
5700f6917   Damien Le Moal   mq-deadline: Intr...
631
632
633
634
635
  	/*
  	 * This may be a requeue of a write request that has locked its
  	 * target zone. If it is the case, this releases the zone lock.
  	 */
  	blk_req_zone_write_unlock(rq);
c807ab520   Bart Van Assche   block/mq-deadline...
636
  	prio = ioprio_class_to_prio[ioprio_class];
38ba64d12   Bart Van Assche   block/mq-deadline...
637
  	dd_count(dd, inserted, prio);
b6d2b054e   Bart Van Assche   mq-deadline: Fix ...
638
  	rq->elv.priv[0] = (void *)(uintptr_t)1;
c807ab520   Bart Van Assche   block/mq-deadline...
639

fd2ef39cc   Jan Kara   blk: Fix lock inv...
640
641
  	if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
  		blk_mq_free_requests(&free);
945ffb60c   Jens Axboe   mq-deadline: add ...
642
  		return;
fd2ef39cc   Jan Kara   blk: Fix lock inv...
643
  	}
945ffb60c   Jens Axboe   mq-deadline: add ...
644

b357e4a69   Chaitanya Kulkarni   block: get rid of...
645
  	trace_block_rq_insert(rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
646

c807ab520   Bart Van Assche   block/mq-deadline...
647
  	per_prio = &dd->per_prio[prio];
7687b38ae   Lin Feng   bfq/mq-deadline: ...
648
  	if (at_head) {
c807ab520   Bart Van Assche   block/mq-deadline...
649
  		list_add(&rq->queuelist, &per_prio->dispatch);
945ffb60c   Jens Axboe   mq-deadline: add ...
650
  	} else {
c807ab520   Bart Van Assche   block/mq-deadline...
651
  		deadline_add_rq_rb(per_prio, rq);
945ffb60c   Jens Axboe   mq-deadline: add ...
652
653
654
655
656
657
658
659
660
661
662
  
  		if (rq_mergeable(rq)) {
  			elv_rqhash_add(q, rq);
  			if (!q->last_merge)
  				q->last_merge = rq;
  		}
  
  		/*
  		 * set expire time and add to fifo list
  		 */
  		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
c807ab520   Bart Van Assche   block/mq-deadline...
663
  		list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
945ffb60c   Jens Axboe   mq-deadline: add ...
664
665
  	}
  }
46eae2e32   Bart Van Assche   block/mq-deadline...
666
667
668
  /*
   * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
   */
945ffb60c   Jens Axboe   mq-deadline: add ...
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
  static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
  			       struct list_head *list, bool at_head)
  {
  	struct request_queue *q = hctx->queue;
  	struct deadline_data *dd = q->elevator->elevator_data;
  
  	spin_lock(&dd->lock);
  	while (!list_empty(list)) {
  		struct request *rq;
  
  		rq = list_first_entry(list, struct request, queuelist);
  		list_del_init(&rq->queuelist);
  		dd_insert_request(hctx, rq, at_head);
  	}
  	spin_unlock(&dd->lock);
  }
b6d2b054e   Bart Van Assche   mq-deadline: Fix ...
685
  /* Callback from inside blk_mq_rq_ctx_init(). */
5d9c305b8   Christoph Hellwig   blk-mq: remove th...
686
  static void dd_prepare_request(struct request *rq)
f3bc78d2d   Damien Le Moal   mq-deadline: Make...
687
  {
b6d2b054e   Bart Van Assche   mq-deadline: Fix ...
688
  	rq->elv.priv[0] = NULL;
f3bc78d2d   Damien Le Moal   mq-deadline: Make...
689
690
691
  }
  
  /*
46eae2e32   Bart Van Assche   block/mq-deadline...
692
693
   * Callback from inside blk_mq_free_request().
   *
5700f6917   Damien Le Moal   mq-deadline: Intr...
694
695
696
   * For zoned block devices, write unlock the target zone of
   * completed write requests. Do this while holding the zone lock
   * spinlock so that the zone is never unlocked while deadline_fifo_request()
f3bc78d2d   Damien Le Moal   mq-deadline: Make...
697
698
   * or deadline_next_request() are executing. This function is called for
   * all requests, whether or not these requests complete successfully.
cb8acabbe   Damien Le Moal   block: mq-deadlin...
699
700
701
702
703
704
705
   *
   * For a zoned block device, __dd_dispatch_request() may have stopped
   * dispatching requests if all the queued requests are write requests directed
   * at zones that are already locked due to on-going write requests. To ensure
   * write request dispatch progress in this case, mark the queue as needing a
   * restart to ensure that the queue is run again after completion of the
   * request and zones being unlocked.
5700f6917   Damien Le Moal   mq-deadline: Intr...
706
   */
f3bc78d2d   Damien Le Moal   mq-deadline: Make...
707
  static void dd_finish_request(struct request *rq)
5700f6917   Damien Le Moal   mq-deadline: Intr...
708
709
  {
  	struct request_queue *q = rq->q;
c807ab520   Bart Van Assche   block/mq-deadline...
710
711
712
713
  	struct deadline_data *dd = q->elevator->elevator_data;
  	const u8 ioprio_class = dd_rq_ioclass(rq);
  	const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
  	struct dd_per_prio *per_prio = &dd->per_prio[prio];
5700f6917   Damien Le Moal   mq-deadline: Intr...
714

b6d2b054e   Bart Van Assche   mq-deadline: Fix ...
715
716
717
718
719
720
721
722
  	/*
  	 * The block layer core may call dd_finish_request() without having
  	 * called dd_insert_requests(). Hence only update statistics for
  	 * requests for which dd_insert_requests() has been called. See also
  	 * blk_mq_request_bypass_insert().
  	 */
  	if (rq->elv.priv[0])
  		dd_count(dd, completed, prio);
38ba64d12   Bart Van Assche   block/mq-deadline...
723

5700f6917   Damien Le Moal   mq-deadline: Intr...
724
  	if (blk_queue_is_zoned(q)) {
5700f6917   Damien Le Moal   mq-deadline: Intr...
725
726
727
728
  		unsigned long flags;
  
  		spin_lock_irqsave(&dd->zone_lock, flags);
  		blk_req_zone_write_unlock(rq);
c807ab520   Bart Van Assche   block/mq-deadline...
729
  		if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
cb8acabbe   Damien Le Moal   block: mq-deadlin...
730
  			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
5700f6917   Damien Le Moal   mq-deadline: Intr...
731
732
733
  		spin_unlock_irqrestore(&dd->zone_lock, flags);
  	}
  }
c807ab520   Bart Van Assche   block/mq-deadline...
734
735
736
737
738
739
  static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
  {
  	return !list_empty_careful(&per_prio->dispatch) ||
  		!list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
  		!list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
  }
945ffb60c   Jens Axboe   mq-deadline: add ...
740
741
742
  static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
  {
  	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
c807ab520   Bart Van Assche   block/mq-deadline...
743
744
745
746
747
  	enum dd_prio prio;
  
  	for (prio = 0; prio <= DD_PRIO_MAX; prio++)
  		if (dd_has_work_for_prio(&dd->per_prio[prio]))
  			return true;
945ffb60c   Jens Axboe   mq-deadline: add ...
748

c807ab520   Bart Van Assche   block/mq-deadline...
749
  	return false;
945ffb60c   Jens Axboe   mq-deadline: add ...
750
751
752
753
754
  }
  
  /*
   * sysfs parts below
   */
d6d7f013d   Bart Van Assche   block/mq-deadline...
755
  #define SHOW_INT(__FUNC, __VAR)						\
945ffb60c   Jens Axboe   mq-deadline: add ...
756
757
758
  static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
  {									\
  	struct deadline_data *dd = e->elevator_data;			\
d6d7f013d   Bart Van Assche   block/mq-deadline...
759
760
761
  									\
  	return sysfs_emit(page, "%d
  ", __VAR);				\
945ffb60c   Jens Axboe   mq-deadline: add ...
762
  }
d6d7f013d   Bart Van Assche   block/mq-deadline...
763
764
765
766
767
  #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
  SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
  SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
  SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
  SHOW_INT(deadline_front_merges_show, dd->front_merges);
269fbc268   Jens Axboe   block: fix async_...
768
  SHOW_INT(deadline_async_depth_show, dd->async_depth);
d6d7f013d   Bart Van Assche   block/mq-deadline...
769
770
771
  SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
  #undef SHOW_INT
  #undef SHOW_JIFFIES
945ffb60c   Jens Axboe   mq-deadline: add ...
772
773
774
775
776
  
  #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
  static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
  {									\
  	struct deadline_data *dd = e->elevator_data;			\
d6d7f013d   Bart Van Assche   block/mq-deadline...
777
778
779
780
781
  	int __data, __ret;						\
  									\
  	__ret = kstrtoint(page, 0, &__data);				\
  	if (__ret < 0)							\
  		return __ret;						\
945ffb60c   Jens Axboe   mq-deadline: add ...
782
783
784
785
  	if (__data < (MIN))						\
  		__data = (MIN);						\
  	else if (__data > (MAX))					\
  		__data = (MAX);						\
d6d7f013d   Bart Van Assche   block/mq-deadline...
786
  	*(__PTR) = __CONV(__data);					\
235f8da11   weiping zhang   block, scheduler:...
787
  	return count;							\
945ffb60c   Jens Axboe   mq-deadline: add ...
788
  }
d6d7f013d   Bart Van Assche   block/mq-deadline...
789
790
791
792
793
794
795
796
  #define STORE_INT(__FUNC, __PTR, MIN, MAX)				\
  	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
  #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)				\
  	STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
  STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
  STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
  STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
  STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
269fbc268   Jens Axboe   block: fix async_...
797
  STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
d6d7f013d   Bart Van Assche   block/mq-deadline...
798
  STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
945ffb60c   Jens Axboe   mq-deadline: add ...
799
  #undef STORE_FUNCTION
d6d7f013d   Bart Van Assche   block/mq-deadline...
800
801
  #undef STORE_INT
  #undef STORE_JIFFIES
945ffb60c   Jens Axboe   mq-deadline: add ...
802
803
  
  #define DD_ATTR(name) \
5657a819a   Joe Perches   block drivers/blo...
804
  	__ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
945ffb60c   Jens Axboe   mq-deadline: add ...
805
806
807
808
809
810
  
  static struct elv_fs_entry deadline_attrs[] = {
  	DD_ATTR(read_expire),
  	DD_ATTR(write_expire),
  	DD_ATTR(writes_starved),
  	DD_ATTR(front_merges),
07757588e   Bart Van Assche   block/mq-deadline...
811
  	DD_ATTR(async_depth),
945ffb60c   Jens Axboe   mq-deadline: add ...
812
813
814
  	DD_ATTR(fifo_batch),
  	__ATTR_NULL
  };
daaadb3e9   Omar Sandoval   mq-deadline: add ...
815
  #ifdef CONFIG_BLK_DEBUG_FS
c807ab520   Bart Van Assche   block/mq-deadline...
816
  #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)		\
daaadb3e9   Omar Sandoval   mq-deadline: add ...
817
818
819
820
821
822
  static void *deadline_##name##_fifo_start(struct seq_file *m,		\
  					  loff_t *pos)			\
  	__acquires(&dd->lock)						\
  {									\
  	struct request_queue *q = m->private;				\
  	struct deadline_data *dd = q->elevator->elevator_data;		\
c807ab520   Bart Van Assche   block/mq-deadline...
823
  	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
daaadb3e9   Omar Sandoval   mq-deadline: add ...
824
825
  									\
  	spin_lock(&dd->lock);						\
c807ab520   Bart Van Assche   block/mq-deadline...
826
  	return seq_list_start(&per_prio->fifo_list[data_dir], *pos);	\
daaadb3e9   Omar Sandoval   mq-deadline: add ...
827
828
829
830
831
832
833
  }									\
  									\
  static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,	\
  					 loff_t *pos)			\
  {									\
  	struct request_queue *q = m->private;				\
  	struct deadline_data *dd = q->elevator->elevator_data;		\
c807ab520   Bart Van Assche   block/mq-deadline...
834
  	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
daaadb3e9   Omar Sandoval   mq-deadline: add ...
835
  									\
c807ab520   Bart Van Assche   block/mq-deadline...
836
  	return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);	\
daaadb3e9   Omar Sandoval   mq-deadline: add ...
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
  }									\
  									\
  static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)	\
  	__releases(&dd->lock)						\
  {									\
  	struct request_queue *q = m->private;				\
  	struct deadline_data *dd = q->elevator->elevator_data;		\
  									\
  	spin_unlock(&dd->lock);						\
  }									\
  									\
  static const struct seq_operations deadline_##name##_fifo_seq_ops = {	\
  	.start	= deadline_##name##_fifo_start,				\
  	.next	= deadline_##name##_fifo_next,				\
  	.stop	= deadline_##name##_fifo_stop,				\
  	.show	= blk_mq_debugfs_rq_show,				\
  };									\
  									\
  static int deadline_##name##_next_rq_show(void *data,			\
  					  struct seq_file *m)		\
  {									\
  	struct request_queue *q = data;					\
  	struct deadline_data *dd = q->elevator->elevator_data;		\
c807ab520   Bart Van Assche   block/mq-deadline...
860
861
  	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
  	struct request *rq = per_prio->next_rq[data_dir];		\
daaadb3e9   Omar Sandoval   mq-deadline: add ...
862
863
864
865
866
  									\
  	if (rq)								\
  		__blk_mq_debugfs_rq_show(m, rq);			\
  	return 0;							\
  }
c807ab520   Bart Van Assche   block/mq-deadline...
867
868
869
870
871
872
873
  
  DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
  DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
  DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
  DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
  DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
  DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
daaadb3e9   Omar Sandoval   mq-deadline: add ...
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
  #undef DEADLINE_DEBUGFS_DDIR_ATTRS
  
  static int deadline_batching_show(void *data, struct seq_file *m)
  {
  	struct request_queue *q = data;
  	struct deadline_data *dd = q->elevator->elevator_data;
  
  	seq_printf(m, "%u
  ", dd->batching);
  	return 0;
  }
  
  static int deadline_starved_show(void *data, struct seq_file *m)
  {
  	struct request_queue *q = data;
  	struct deadline_data *dd = q->elevator->elevator_data;
  
  	seq_printf(m, "%u
  ", dd->starved);
  	return 0;
  }
07757588e   Bart Van Assche   block/mq-deadline...
895
896
897
898
899
900
901
902
903
  static int dd_async_depth_show(void *data, struct seq_file *m)
  {
  	struct request_queue *q = data;
  	struct deadline_data *dd = q->elevator->elevator_data;
  
  	seq_printf(m, "%u
  ", dd->async_depth);
  	return 0;
  }
55a51ea14   Geert Uytterhoeven   block/mq-deadline...
904
905
906
907
908
  /* Number of requests queued for a given priority level. */
  static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
  {
  	return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
  }
38ba64d12   Bart Van Assche   block/mq-deadline...
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
  static int dd_queued_show(void *data, struct seq_file *m)
  {
  	struct request_queue *q = data;
  	struct deadline_data *dd = q->elevator->elevator_data;
  
  	seq_printf(m, "%u %u %u
  ", dd_queued(dd, DD_RT_PRIO),
  		   dd_queued(dd, DD_BE_PRIO),
  		   dd_queued(dd, DD_IDLE_PRIO));
  	return 0;
  }
  
  /* Number of requests owned by the block driver for a given priority. */
  static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
  {
  	return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
  		- dd_sum(dd, completed, prio);
  }
  
  static int dd_owned_by_driver_show(void *data, struct seq_file *m)
  {
  	struct request_queue *q = data;
  	struct deadline_data *dd = q->elevator->elevator_data;
  
  	seq_printf(m, "%u %u %u
  ", dd_owned_by_driver(dd, DD_RT_PRIO),
  		   dd_owned_by_driver(dd, DD_BE_PRIO),
  		   dd_owned_by_driver(dd, DD_IDLE_PRIO));
  	return 0;
  }
c807ab520   Bart Van Assche   block/mq-deadline...
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
  #define DEADLINE_DISPATCH_ATTR(prio)					\
  static void *deadline_dispatch##prio##_start(struct seq_file *m,	\
  					     loff_t *pos)		\
  	__acquires(&dd->lock)						\
  {									\
  	struct request_queue *q = m->private;				\
  	struct deadline_data *dd = q->elevator->elevator_data;		\
  	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
  									\
  	spin_lock(&dd->lock);						\
  	return seq_list_start(&per_prio->dispatch, *pos);		\
  }									\
  									\
  static void *deadline_dispatch##prio##_next(struct seq_file *m,		\
  					    void *v, loff_t *pos)	\
  {									\
  	struct request_queue *q = m->private;				\
  	struct deadline_data *dd = q->elevator->elevator_data;		\
  	struct dd_per_prio *per_prio = &dd->per_prio[prio];		\
  									\
  	return seq_list_next(v, &per_prio->dispatch, pos);		\
  }									\
  									\
  static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)	\
  	__releases(&dd->lock)						\
  {									\
  	struct request_queue *q = m->private;				\
  	struct deadline_data *dd = q->elevator->elevator_data;		\
  									\
  	spin_unlock(&dd->lock);						\
  }									\
  									\
  static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
  	.start	= deadline_dispatch##prio##_start,			\
  	.next	= deadline_dispatch##prio##_next,			\
  	.stop	= deadline_dispatch##prio##_stop,			\
  	.show	= blk_mq_debugfs_rq_show,				\
daaadb3e9   Omar Sandoval   mq-deadline: add ...
976
  }
c807ab520   Bart Van Assche   block/mq-deadline...
977
978
979
980
  DEADLINE_DISPATCH_ATTR(0);
  DEADLINE_DISPATCH_ATTR(1);
  DEADLINE_DISPATCH_ATTR(2);
  #undef DEADLINE_DISPATCH_ATTR
daaadb3e9   Omar Sandoval   mq-deadline: add ...
981

c807ab520   Bart Van Assche   block/mq-deadline...
982
983
984
985
  #define DEADLINE_QUEUE_DDIR_ATTRS(name)					\
  	{#name "_fifo_list", 0400,					\
  			.seq_ops = &deadline_##name##_fifo_seq_ops}
  #define DEADLINE_NEXT_RQ_ATTR(name)					\
daaadb3e9   Omar Sandoval   mq-deadline: add ...
986
987
  	{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
  static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
c807ab520   Bart Van Assche   block/mq-deadline...
988
989
990
991
992
993
994
995
996
997
998
999
  	DEADLINE_QUEUE_DDIR_ATTRS(read0),
  	DEADLINE_QUEUE_DDIR_ATTRS(write0),
  	DEADLINE_QUEUE_DDIR_ATTRS(read1),
  	DEADLINE_QUEUE_DDIR_ATTRS(write1),
  	DEADLINE_QUEUE_DDIR_ATTRS(read2),
  	DEADLINE_QUEUE_DDIR_ATTRS(write2),
  	DEADLINE_NEXT_RQ_ATTR(read0),
  	DEADLINE_NEXT_RQ_ATTR(write0),
  	DEADLINE_NEXT_RQ_ATTR(read1),
  	DEADLINE_NEXT_RQ_ATTR(write1),
  	DEADLINE_NEXT_RQ_ATTR(read2),
  	DEADLINE_NEXT_RQ_ATTR(write2),
daaadb3e9   Omar Sandoval   mq-deadline: add ...
1000
1001
  	{"batching", 0400, deadline_batching_show},
  	{"starved", 0400, deadline_starved_show},
07757588e   Bart Van Assche   block/mq-deadline...
1002
  	{"async_depth", 0400, dd_async_depth_show},
c807ab520   Bart Van Assche   block/mq-deadline...
1003
1004
1005
  	{"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
  	{"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
  	{"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
38ba64d12   Bart Van Assche   block/mq-deadline...
1006
1007
  	{"owned_by_driver", 0400, dd_owned_by_driver_show},
  	{"queued", 0400, dd_queued_show},
daaadb3e9   Omar Sandoval   mq-deadline: add ...
1008
1009
1010
1011
  	{},
  };
  #undef DEADLINE_QUEUE_DDIR_ATTRS
  #endif
945ffb60c   Jens Axboe   mq-deadline: add ...
1012
  static struct elevator_type mq_deadline = {
f9cd4bfe9   Jens Axboe   block: get rid of...
1013
  	.ops = {
07757588e   Bart Van Assche   block/mq-deadline...
1014
1015
  		.depth_updated		= dd_depth_updated,
  		.limit_depth		= dd_limit_depth,
945ffb60c   Jens Axboe   mq-deadline: add ...
1016
  		.insert_requests	= dd_insert_requests,
c13660a08   Jens Axboe   blk-mq-sched: cha...
1017
  		.dispatch_request	= dd_dispatch_request,
f3bc78d2d   Damien Le Moal   mq-deadline: Make...
1018
1019
  		.prepare_request	= dd_prepare_request,
  		.finish_request		= dd_finish_request,
945ffb60c   Jens Axboe   mq-deadline: add ...
1020
1021
1022
1023
1024
1025
1026
  		.next_request		= elv_rb_latter_request,
  		.former_request		= elv_rb_former_request,
  		.bio_merge		= dd_bio_merge,
  		.request_merge		= dd_request_merge,
  		.requests_merged	= dd_merged_requests,
  		.request_merged		= dd_request_merged,
  		.has_work		= dd_has_work,
3e9a99eba   Bart Van Assche   block/mq-deadline...
1027
1028
  		.init_sched		= dd_init_sched,
  		.exit_sched		= dd_exit_sched,
07757588e   Bart Van Assche   block/mq-deadline...
1029
  		.init_hctx		= dd_init_hctx,
945ffb60c   Jens Axboe   mq-deadline: add ...
1030
  	},
daaadb3e9   Omar Sandoval   mq-deadline: add ...
1031
1032
1033
  #ifdef CONFIG_BLK_DEBUG_FS
  	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
  #endif
945ffb60c   Jens Axboe   mq-deadline: add ...
1034
1035
  	.elevator_attrs = deadline_attrs,
  	.elevator_name = "mq-deadline",
4d740bc9f   Jens Axboe   mq-deadline: add ...
1036
  	.elevator_alias = "deadline",
68c43f133   Damien Le Moal   block: Introduce ...
1037
  	.elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
945ffb60c   Jens Axboe   mq-deadline: add ...
1038
1039
  	.elevator_owner = THIS_MODULE,
  };
7de967e76   Ben Hutchings   mq-deadline: Enab...
1040
  MODULE_ALIAS("mq-deadline-iosched");
945ffb60c   Jens Axboe   mq-deadline: add ...
1041
1042
1043
  
  static int __init deadline_init(void)
  {
0f7839955   Tejun Heo   Revert "block/mq-...
1044
  	return elv_register(&mq_deadline);
945ffb60c   Jens Axboe   mq-deadline: add ...
1045
1046
1047
1048
1049
1050
1051
1052
1053
  }
  
  static void __exit deadline_exit(void)
  {
  	elv_unregister(&mq_deadline);
  }
  
  module_init(deadline_init);
  module_exit(deadline_exit);
c807ab520   Bart Van Assche   block/mq-deadline...
1054
  MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
945ffb60c   Jens Axboe   mq-deadline: add ...
1055
1056
  MODULE_LICENSE("GPL");
  MODULE_DESCRIPTION("MQ deadline IO scheduler");