Blame view

block/blk-mq-sched.c 13.1 KB
bd166ef18   Jens Axboe   blk-mq-sched: add...
1
2
3
4
5
6
7
8
9
10
11
12
13
  /*
   * blk-mq scheduling framework
   *
   * Copyright (C) 2016 Jens Axboe
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/blk-mq.h>
  
  #include <trace/events/block.h>
  
  #include "blk.h"
  #include "blk-mq.h"
d332ce091   Omar Sandoval   blk-mq-debugfs: a...
14
  #include "blk-mq-debugfs.h"
bd166ef18   Jens Axboe   blk-mq-sched: add...
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
  #include "blk-mq-sched.h"
  #include "blk-mq-tag.h"
  #include "blk-wbt.h"
  
  void blk_mq_sched_free_hctx_data(struct request_queue *q,
  				 void (*exit)(struct blk_mq_hw_ctx *))
  {
  	struct blk_mq_hw_ctx *hctx;
  	int i;
  
  	queue_for_each_hw_ctx(q, hctx, i) {
  		if (exit && hctx->sched_data)
  			exit(hctx);
  		kfree(hctx->sched_data);
  		hctx->sched_data = NULL;
  	}
  }
  EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
44e8c2bff   Christoph Hellwig   blk-mq: refactor ...
33
  void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
bd166ef18   Jens Axboe   blk-mq-sched: add...
34
  {
44e8c2bff   Christoph Hellwig   blk-mq: refactor ...
35
36
  	struct request_queue *q = rq->q;
  	struct io_context *ioc = rq_ioc(bio);
bd166ef18   Jens Axboe   blk-mq-sched: add...
37
38
39
40
41
42
43
44
45
46
47
  	struct io_cq *icq;
  
  	spin_lock_irq(q->queue_lock);
  	icq = ioc_lookup_icq(ioc, q);
  	spin_unlock_irq(q->queue_lock);
  
  	if (!icq) {
  		icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
  		if (!icq)
  			return;
  	}
ea511e3c2   Christoph Hellwig   blk-mq: remove bl...
48
  	get_io_context(icq->ioc);
44e8c2bff   Christoph Hellwig   blk-mq: refactor ...
49
  	rq->elv.icq = icq;
bd166ef18   Jens Axboe   blk-mq-sched: add...
50
  }
8e8320c93   Jens Axboe   blk-mq: fix perfo...
51
52
53
54
  /*
   * Mark a hardware queue as needing a restart. For shared queues, maintain
   * a count of how many hardware queues are marked for restart.
   */
6353c0a03   Damien Le Moal   block: mq-deadlin...
55
  void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
8e8320c93   Jens Axboe   blk-mq: fix perfo...
56
57
58
  {
  	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
  		return;
97889f9ac   Ming Lei   blk-mq: remove sy...
59
  	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
8e8320c93   Jens Axboe   blk-mq: fix perfo...
60
  }
6353c0a03   Damien Le Moal   block: mq-deadlin...
61
  EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
8e8320c93   Jens Axboe   blk-mq: fix perfo...
62

97889f9ac   Ming Lei   blk-mq: remove sy...
63
  void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
8e8320c93   Jens Axboe   blk-mq: fix perfo...
64
65
  {
  	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
97889f9ac   Ming Lei   blk-mq: remove sy...
66
67
  		return;
  	clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
8e8320c93   Jens Axboe   blk-mq: fix perfo...
68

97889f9ac   Ming Lei   blk-mq: remove sy...
69
  	blk_mq_run_hw_queue(hctx, true);
8e8320c93   Jens Axboe   blk-mq: fix perfo...
70
  }
1f460b63d   Ming Lei   blk-mq: don't res...
71
72
73
74
75
76
  /*
   * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
   * its queue by itself in its completion handler, so we don't need to
   * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
   */
  static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
caf8eb0d6   Ming Lei   blk-mq-sched: mov...
77
78
79
80
81
82
  {
  	struct request_queue *q = hctx->queue;
  	struct elevator_queue *e = q->elevator;
  	LIST_HEAD(rq_list);
  
  	do {
de1482974   Ming Lei   blk-mq: introduce...
83
  		struct request *rq;
caf8eb0d6   Ming Lei   blk-mq-sched: mov...
84

de1482974   Ming Lei   blk-mq: introduce...
85
86
  		if (e->type->ops.mq.has_work &&
  				!e->type->ops.mq.has_work(hctx))
caf8eb0d6   Ming Lei   blk-mq-sched: mov...
87
  			break;
de1482974   Ming Lei   blk-mq: introduce...
88

88022d720   Ming Lei   blk-mq: don't han...
89
  		if (!blk_mq_get_dispatch_budget(hctx))
1f460b63d   Ming Lei   blk-mq: don't res...
90
  			break;
de1482974   Ming Lei   blk-mq: introduce...
91
92
93
94
95
  
  		rq = e->type->ops.mq.dispatch_request(hctx);
  		if (!rq) {
  			blk_mq_put_dispatch_budget(hctx);
  			break;
de1482974   Ming Lei   blk-mq: introduce...
96
97
98
99
100
101
102
  		}
  
  		/*
  		 * Now this rq owns the budget which has to be released
  		 * if this rq won't be queued to driver via .queue_rq()
  		 * in blk_mq_dispatch_rq_list().
  		 */
caf8eb0d6   Ming Lei   blk-mq-sched: mov...
103
  		list_add(&rq->queuelist, &rq_list);
de1482974   Ming Lei   blk-mq: introduce...
104
  	} while (blk_mq_dispatch_rq_list(q, &rq_list, true));
caf8eb0d6   Ming Lei   blk-mq-sched: mov...
105
  }
b347689ff   Ming Lei   blk-mq-sched: imp...
106
107
108
109
110
111
112
113
114
115
  static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
  					  struct blk_mq_ctx *ctx)
  {
  	unsigned idx = ctx->index_hw;
  
  	if (++idx == hctx->nr_ctx)
  		idx = 0;
  
  	return hctx->ctxs[idx];
  }
1f460b63d   Ming Lei   blk-mq: don't res...
116
117
118
119
120
121
  /*
   * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
   * its queue by itself in its completion handler, so we don't need to
   * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
   */
  static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
b347689ff   Ming Lei   blk-mq-sched: imp...
122
123
124
125
126
127
128
  {
  	struct request_queue *q = hctx->queue;
  	LIST_HEAD(rq_list);
  	struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
  
  	do {
  		struct request *rq;
b347689ff   Ming Lei   blk-mq-sched: imp...
129
130
131
  
  		if (!sbitmap_any_bit_set(&hctx->ctx_map))
  			break;
88022d720   Ming Lei   blk-mq: don't han...
132
  		if (!blk_mq_get_dispatch_budget(hctx))
1f460b63d   Ming Lei   blk-mq: don't res...
133
  			break;
b347689ff   Ming Lei   blk-mq-sched: imp...
134
135
136
137
138
  
  		rq = blk_mq_dequeue_from_ctx(hctx, ctx);
  		if (!rq) {
  			blk_mq_put_dispatch_budget(hctx);
  			break;
b347689ff   Ming Lei   blk-mq-sched: imp...
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
  		}
  
  		/*
  		 * Now this rq owns the budget which has to be released
  		 * if this rq won't be queued to driver via .queue_rq()
  		 * in blk_mq_dispatch_rq_list().
  		 */
  		list_add(&rq->queuelist, &rq_list);
  
  		/* round robin for fair dispatch */
  		ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
  
  	} while (blk_mq_dispatch_rq_list(q, &rq_list, true));
  
  	WRITE_ONCE(hctx->dispatch_from, ctx);
b347689ff   Ming Lei   blk-mq-sched: imp...
154
  }
1f460b63d   Ming Lei   blk-mq: don't res...
155
  void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
bd166ef18   Jens Axboe   blk-mq-sched: add...
156
  {
81380ca10   Omar Sandoval   blk-mq: use the r...
157
158
  	struct request_queue *q = hctx->queue;
  	struct elevator_queue *e = q->elevator;
64765a75e   Jens Axboe   blk-mq-sched: ask...
159
  	const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
bd166ef18   Jens Axboe   blk-mq-sched: add...
160
  	LIST_HEAD(rq_list);
f4560ffe8   Ming Lei   blk-mq: use QUEUE...
161
162
  	/* RCU or SRCU read lock is needed before checking quiesced flag */
  	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
1f460b63d   Ming Lei   blk-mq: don't res...
163
  		return;
bd166ef18   Jens Axboe   blk-mq-sched: add...
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
  
  	hctx->run++;
  
  	/*
  	 * If we have previous entries on our dispatch list, grab them first for
  	 * more fair dispatch.
  	 */
  	if (!list_empty_careful(&hctx->dispatch)) {
  		spin_lock(&hctx->lock);
  		if (!list_empty(&hctx->dispatch))
  			list_splice_init(&hctx->dispatch, &rq_list);
  		spin_unlock(&hctx->lock);
  	}
  
  	/*
  	 * Only ask the scheduler for requests, if we didn't have residual
  	 * requests from the dispatch list. This is to avoid the case where
  	 * we only ever dispatch a fraction of the requests available because
  	 * of low device queue depth. Once we pull requests out of the IO
  	 * scheduler, we can no longer merge or sort them. So it's best to
  	 * leave them there for as long as we can. Mark the hw queue as
  	 * needing a restart in that case.
caf8eb0d6   Ming Lei   blk-mq-sched: mov...
186
187
188
189
  	 *
  	 * We want to dispatch from the scheduler if there was nothing
  	 * on the dispatch list or we were able to dispatch from the
  	 * dispatch list.
bd166ef18   Jens Axboe   blk-mq-sched: add...
190
  	 */
c13660a08   Jens Axboe   blk-mq-sched: cha...
191
  	if (!list_empty(&rq_list)) {
d38d35155   Omar Sandoval   blk-mq-sched: sep...
192
  		blk_mq_sched_mark_restart_hctx(hctx);
b347689ff   Ming Lei   blk-mq-sched: imp...
193
194
  		if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
  			if (has_sched_dispatch)
1f460b63d   Ming Lei   blk-mq: don't res...
195
  				blk_mq_do_dispatch_sched(hctx);
b347689ff   Ming Lei   blk-mq-sched: imp...
196
  			else
1f460b63d   Ming Lei   blk-mq: don't res...
197
  				blk_mq_do_dispatch_ctx(hctx);
b347689ff   Ming Lei   blk-mq-sched: imp...
198
  		}
caf8eb0d6   Ming Lei   blk-mq-sched: mov...
199
  	} else if (has_sched_dispatch) {
1f460b63d   Ming Lei   blk-mq: don't res...
200
  		blk_mq_do_dispatch_sched(hctx);
6e7687173   Ming Lei   blk-mq: dequeue r...
201
202
  	} else if (hctx->dispatch_busy) {
  		/* dequeue request one by one from sw queue if queue is busy */
1f460b63d   Ming Lei   blk-mq: don't res...
203
  		blk_mq_do_dispatch_ctx(hctx);
caf8eb0d6   Ming Lei   blk-mq-sched: mov...
204
  	} else {
c13660a08   Jens Axboe   blk-mq-sched: cha...
205
  		blk_mq_flush_busy_ctxs(hctx, &rq_list);
de1482974   Ming Lei   blk-mq: introduce...
206
  		blk_mq_dispatch_rq_list(q, &rq_list, false);
64765a75e   Jens Axboe   blk-mq-sched: ask...
207
  	}
bd166ef18   Jens Axboe   blk-mq-sched: add...
208
  }
e4d750c97   Jens Axboe   block: free merge...
209
210
  bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
  			    struct request **merged_request)
bd166ef18   Jens Axboe   blk-mq-sched: add...
211
212
  {
  	struct request *rq;
bd166ef18   Jens Axboe   blk-mq-sched: add...
213

34fe7c054   Christoph Hellwig   block: enumify EL...
214
215
  	switch (elv_merge(q, &rq, bio)) {
  	case ELEVATOR_BACK_MERGE:
bd166ef18   Jens Axboe   blk-mq-sched: add...
216
217
  		if (!blk_mq_sched_allow_merge(q, rq, bio))
  			return false;
34fe7c054   Christoph Hellwig   block: enumify EL...
218
219
220
221
222
223
224
  		if (!bio_attempt_back_merge(q, rq, bio))
  			return false;
  		*merged_request = attempt_back_merge(q, rq);
  		if (!*merged_request)
  			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
  		return true;
  	case ELEVATOR_FRONT_MERGE:
bd166ef18   Jens Axboe   blk-mq-sched: add...
225
226
  		if (!blk_mq_sched_allow_merge(q, rq, bio))
  			return false;
34fe7c054   Christoph Hellwig   block: enumify EL...
227
228
229
230
231
232
  		if (!bio_attempt_front_merge(q, rq, bio))
  			return false;
  		*merged_request = attempt_front_merge(q, rq);
  		if (!*merged_request)
  			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
  		return true;
bea99a500   Keith Busch   blk-mq-sched: Ena...
233
234
  	case ELEVATOR_DISCARD_MERGE:
  		return bio_attempt_discard_merge(q, rq, bio);
34fe7c054   Christoph Hellwig   block: enumify EL...
235
236
  	default:
  		return false;
bd166ef18   Jens Axboe   blk-mq-sched: add...
237
  	}
bd166ef18   Jens Axboe   blk-mq-sched: add...
238
239
  }
  EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
9bddeb2a5   Ming Lei   blk-mq: make per-...
240
  /*
9c5587346   Jens Axboe   blk-mq: abstract ...
241
242
   * Iterate list of requests and see if we can merge this bio with any
   * of them.
9bddeb2a5   Ming Lei   blk-mq: make per-...
243
   */
9c5587346   Jens Axboe   blk-mq: abstract ...
244
245
  bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
  			   struct bio *bio)
9bddeb2a5   Ming Lei   blk-mq: make per-...
246
247
248
  {
  	struct request *rq;
  	int checked = 8;
9c5587346   Jens Axboe   blk-mq: abstract ...
249
  	list_for_each_entry_reverse(rq, list, queuelist) {
9bddeb2a5   Ming Lei   blk-mq: make per-...
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
  		bool merged = false;
  
  		if (!checked--)
  			break;
  
  		if (!blk_rq_merge_ok(rq, bio))
  			continue;
  
  		switch (blk_try_merge(rq, bio)) {
  		case ELEVATOR_BACK_MERGE:
  			if (blk_mq_sched_allow_merge(q, rq, bio))
  				merged = bio_attempt_back_merge(q, rq, bio);
  			break;
  		case ELEVATOR_FRONT_MERGE:
  			if (blk_mq_sched_allow_merge(q, rq, bio))
  				merged = bio_attempt_front_merge(q, rq, bio);
  			break;
  		case ELEVATOR_DISCARD_MERGE:
  			merged = bio_attempt_discard_merge(q, rq, bio);
  			break;
  		default:
  			continue;
  		}
9bddeb2a5   Ming Lei   blk-mq: make per-...
273
274
275
276
277
  		return merged;
  	}
  
  	return false;
  }
9c5587346   Jens Axboe   blk-mq: abstract ...
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
  EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);
  
  /*
   * Reverse check our software queue for entries that we could potentially
   * merge with. Currently includes a hand-wavy stop count of 8, to not spend
   * too much time checking for merges.
   */
  static bool blk_mq_attempt_merge(struct request_queue *q,
  				 struct blk_mq_ctx *ctx, struct bio *bio)
  {
  	lockdep_assert_held(&ctx->lock);
  
  	if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) {
  		ctx->rq_merged++;
  		return true;
  	}
  
  	return false;
  }
9bddeb2a5   Ming Lei   blk-mq: make per-...
297

bd166ef18   Jens Axboe   blk-mq-sched: add...
298
299
300
  bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
  {
  	struct elevator_queue *e = q->elevator;
9bddeb2a5   Ming Lei   blk-mq: make per-...
301
302
303
  	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
  	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
  	bool ret = false;
bd166ef18   Jens Axboe   blk-mq-sched: add...
304

9bddeb2a5   Ming Lei   blk-mq: make per-...
305
  	if (e && e->type->ops.mq.bio_merge) {
bd166ef18   Jens Axboe   blk-mq-sched: add...
306
307
308
  		blk_mq_put_ctx(ctx);
  		return e->type->ops.mq.bio_merge(hctx, bio);
  	}
b04f50ab8   Ming Lei   blk-mq: only atte...
309
310
  	if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
  			!list_empty_careful(&ctx->rq_list)) {
9bddeb2a5   Ming Lei   blk-mq: make per-...
311
312
313
314
315
316
317
318
  		/* default per sw-queue merge */
  		spin_lock(&ctx->lock);
  		ret = blk_mq_attempt_merge(q, ctx, bio);
  		spin_unlock(&ctx->lock);
  	}
  
  	blk_mq_put_ctx(ctx);
  	return ret;
bd166ef18   Jens Axboe   blk-mq-sched: add...
319
320
321
322
323
324
325
326
327
328
329
330
331
  }
  
  bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
  {
  	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
  }
  EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
  
  void blk_mq_sched_request_inserted(struct request *rq)
  {
  	trace_block_rq_insert(rq->q, rq);
  }
  EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
0cacba6cf   Omar Sandoval   blk-mq-sched: byp...
332
  static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
a6a252e64   Ming Lei   blk-mq-sched: dec...
333
  				       bool has_sched,
0cacba6cf   Omar Sandoval   blk-mq-sched: byp...
334
  				       struct request *rq)
bd166ef18   Jens Axboe   blk-mq-sched: add...
335
  {
a6a252e64   Ming Lei   blk-mq-sched: dec...
336
337
338
339
340
341
342
  	/* dispatch flush rq directly */
  	if (rq->rq_flags & RQF_FLUSH_SEQ) {
  		spin_lock(&hctx->lock);
  		list_add(&rq->queuelist, &hctx->dispatch);
  		spin_unlock(&hctx->lock);
  		return true;
  	}
923218f61   Ming Lei   blk-mq: don't all...
343
  	if (has_sched)
bd166ef18   Jens Axboe   blk-mq-sched: add...
344
  		rq->rq_flags |= RQF_SORTED;
bd166ef18   Jens Axboe   blk-mq-sched: add...
345

a6a252e64   Ming Lei   blk-mq-sched: dec...
346
  	return false;
bd166ef18   Jens Axboe   blk-mq-sched: add...
347
  }
bd166ef18   Jens Axboe   blk-mq-sched: add...
348

bd6737f1a   Jens Axboe   blk-mq-sched: add...
349
  void blk_mq_sched_insert_request(struct request *rq, bool at_head,
9e97d2951   Mike Snitzer   blk-mq-sched: rem...
350
  				 bool run_queue, bool async)
bd6737f1a   Jens Axboe   blk-mq-sched: add...
351
352
353
354
355
  {
  	struct request_queue *q = rq->q;
  	struct elevator_queue *e = q->elevator;
  	struct blk_mq_ctx *ctx = rq->mq_ctx;
  	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
a6a252e64   Ming Lei   blk-mq-sched: dec...
356
357
  	/* flush rq in flush machinery need to be dispatched directly */
  	if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
923218f61   Ming Lei   blk-mq: don't all...
358
359
  		blk_insert_flush(rq);
  		goto run;
bd6737f1a   Jens Axboe   blk-mq-sched: add...
360
  	}
923218f61   Ming Lei   blk-mq: don't all...
361
  	WARN_ON(e && (rq->tag != -1));
a6a252e64   Ming Lei   blk-mq-sched: dec...
362
  	if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
0cacba6cf   Omar Sandoval   blk-mq-sched: byp...
363
  		goto run;
bd6737f1a   Jens Axboe   blk-mq-sched: add...
364
365
366
367
368
369
370
371
372
373
  	if (e && e->type->ops.mq.insert_requests) {
  		LIST_HEAD(list);
  
  		list_add(&rq->queuelist, &list);
  		e->type->ops.mq.insert_requests(hctx, &list, at_head);
  	} else {
  		spin_lock(&ctx->lock);
  		__blk_mq_insert_request(hctx, rq, at_head);
  		spin_unlock(&ctx->lock);
  	}
0cacba6cf   Omar Sandoval   blk-mq-sched: byp...
374
  run:
bd6737f1a   Jens Axboe   blk-mq-sched: add...
375
376
377
378
379
380
381
382
383
384
385
386
387
  	if (run_queue)
  		blk_mq_run_hw_queue(hctx, async);
  }
  
  void blk_mq_sched_insert_requests(struct request_queue *q,
  				  struct blk_mq_ctx *ctx,
  				  struct list_head *list, bool run_queue_async)
  {
  	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
  	struct elevator_queue *e = hctx->queue->elevator;
  
  	if (e && e->type->ops.mq.insert_requests)
  		e->type->ops.mq.insert_requests(hctx, list, false);
6ce3dd6ee   Ming Lei   blk-mq: issue dir...
388
389
390
391
392
393
394
395
396
397
398
  	else {
  		/*
  		 * try to issue requests directly if the hw queue isn't
  		 * busy in case of 'none' scheduler, and this way may save
  		 * us one extra enqueue & dequeue to sw queue.
  		 */
  		if (!hctx->dispatch_busy && !e && !run_queue_async) {
  			blk_mq_try_issue_list_directly(hctx, list);
  			if (list_empty(list))
  				return;
  		}
bd6737f1a   Jens Axboe   blk-mq-sched: add...
399
  		blk_mq_insert_requests(hctx, ctx, list);
6ce3dd6ee   Ming Lei   blk-mq: issue dir...
400
  	}
bd6737f1a   Jens Axboe   blk-mq-sched: add...
401
402
403
  
  	blk_mq_run_hw_queue(hctx, run_queue_async);
  }
bd166ef18   Jens Axboe   blk-mq-sched: add...
404
405
406
407
408
409
410
411
412
413
  static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
  				   struct blk_mq_hw_ctx *hctx,
  				   unsigned int hctx_idx)
  {
  	if (hctx->sched_tags) {
  		blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
  		blk_mq_free_rq_map(hctx->sched_tags);
  		hctx->sched_tags = NULL;
  	}
  }
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
  static int blk_mq_sched_alloc_tags(struct request_queue *q,
  				   struct blk_mq_hw_ctx *hctx,
  				   unsigned int hctx_idx)
  {
  	struct blk_mq_tag_set *set = q->tag_set;
  	int ret;
  
  	hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
  					       set->reserved_tags);
  	if (!hctx->sched_tags)
  		return -ENOMEM;
  
  	ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
  	if (ret)
  		blk_mq_sched_free_tags(set, hctx, hctx_idx);
  
  	return ret;
  }
54d5329d4   Omar Sandoval   blk-mq-sched: fix...
432
  static void blk_mq_sched_tags_teardown(struct request_queue *q)
bd166ef18   Jens Axboe   blk-mq-sched: add...
433
434
435
  {
  	struct blk_mq_tag_set *set = q->tag_set;
  	struct blk_mq_hw_ctx *hctx;
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
436
437
438
439
440
441
442
443
444
  	int i;
  
  	queue_for_each_hw_ctx(q, hctx, i)
  		blk_mq_sched_free_tags(set, hctx, i);
  }
  
  int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
  {
  	struct blk_mq_hw_ctx *hctx;
ee056f981   Omar Sandoval   blk-mq-sched: pro...
445
  	struct elevator_queue *eq;
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
446
447
448
449
450
  	unsigned int i;
  	int ret;
  
  	if (!e) {
  		q->elevator = NULL;
32a50fabb   Ming Lei   blk-mq: update nr...
451
  		q->nr_requests = q->tag_set->queue_depth;
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
452
453
  		return 0;
  	}
bd166ef18   Jens Axboe   blk-mq-sched: add...
454
455
  
  	/*
32825c45f   Ming Lei   blk-mq-sched: fix...
456
457
458
  	 * Default to double of smaller one between hw queue_depth and 128,
  	 * since we don't split into sync/async like the old code did.
  	 * Additionally, this is a per-hw queue depth.
bd166ef18   Jens Axboe   blk-mq-sched: add...
459
  	 */
32825c45f   Ming Lei   blk-mq-sched: fix...
460
461
  	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
  				   BLKDEV_MAX_RQ);
bd166ef18   Jens Axboe   blk-mq-sched: add...
462

bd166ef18   Jens Axboe   blk-mq-sched: add...
463
  	queue_for_each_hw_ctx(q, hctx, i) {
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
464
  		ret = blk_mq_sched_alloc_tags(q, hctx, i);
bd166ef18   Jens Axboe   blk-mq-sched: add...
465
  		if (ret)
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
466
  			goto err;
bd166ef18   Jens Axboe   blk-mq-sched: add...
467
  	}
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
468
469
470
  	ret = e->ops.mq.init_sched(q, e);
  	if (ret)
  		goto err;
bd166ef18   Jens Axboe   blk-mq-sched: add...
471

d332ce091   Omar Sandoval   blk-mq-debugfs: a...
472
473
474
475
  	blk_mq_debugfs_register_sched(q);
  
  	queue_for_each_hw_ctx(q, hctx, i) {
  		if (e->ops.mq.init_hctx) {
ee056f981   Omar Sandoval   blk-mq-sched: pro...
476
477
478
479
480
481
482
483
  			ret = e->ops.mq.init_hctx(hctx, i);
  			if (ret) {
  				eq = q->elevator;
  				blk_mq_exit_sched(q, eq);
  				kobject_put(&eq->kobj);
  				return ret;
  			}
  		}
d332ce091   Omar Sandoval   blk-mq-debugfs: a...
484
  		blk_mq_debugfs_register_sched_hctx(q, hctx);
ee056f981   Omar Sandoval   blk-mq-sched: pro...
485
  	}
bd166ef18   Jens Axboe   blk-mq-sched: add...
486
  	return 0;
bd166ef18   Jens Axboe   blk-mq-sched: add...
487

6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
488
  err:
54d5329d4   Omar Sandoval   blk-mq-sched: fix...
489
490
  	blk_mq_sched_tags_teardown(q);
  	q->elevator = NULL;
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
491
  	return ret;
bd166ef18   Jens Axboe   blk-mq-sched: add...
492
  }
d34849913   Jens Axboe   blk-mq-sched: all...
493

54d5329d4   Omar Sandoval   blk-mq-sched: fix...
494
495
  void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
  {
ee056f981   Omar Sandoval   blk-mq-sched: pro...
496
497
  	struct blk_mq_hw_ctx *hctx;
  	unsigned int i;
d332ce091   Omar Sandoval   blk-mq-debugfs: a...
498
499
500
501
502
  	queue_for_each_hw_ctx(q, hctx, i) {
  		blk_mq_debugfs_unregister_sched_hctx(hctx);
  		if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
  			e->type->ops.mq.exit_hctx(hctx, i);
  			hctx->sched_data = NULL;
ee056f981   Omar Sandoval   blk-mq-sched: pro...
503
504
  		}
  	}
d332ce091   Omar Sandoval   blk-mq-debugfs: a...
505
  	blk_mq_debugfs_unregister_sched(q);
54d5329d4   Omar Sandoval   blk-mq-sched: fix...
506
507
508
509
510
  	if (e->type->ops.mq.exit_sched)
  		e->type->ops.mq.exit_sched(e);
  	blk_mq_sched_tags_teardown(q);
  	q->elevator = NULL;
  }