Blame view

block/blk-mq-sched.c 14.5 KB
bd166ef18   Jens Axboe   blk-mq-sched: add...
1
2
3
4
5
6
7
8
9
10
11
12
13
  /*
   * blk-mq scheduling framework
   *
   * Copyright (C) 2016 Jens Axboe
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/blk-mq.h>
  
  #include <trace/events/block.h>
  
  #include "blk.h"
  #include "blk-mq.h"
d332ce091   Omar Sandoval   blk-mq-debugfs: a...
14
  #include "blk-mq-debugfs.h"
bd166ef18   Jens Axboe   blk-mq-sched: add...
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
  #include "blk-mq-sched.h"
  #include "blk-mq-tag.h"
  #include "blk-wbt.h"
  
  void blk_mq_sched_free_hctx_data(struct request_queue *q,
  				 void (*exit)(struct blk_mq_hw_ctx *))
  {
  	struct blk_mq_hw_ctx *hctx;
  	int i;
  
  	queue_for_each_hw_ctx(q, hctx, i) {
  		if (exit && hctx->sched_data)
  			exit(hctx);
  		kfree(hctx->sched_data);
  		hctx->sched_data = NULL;
  	}
  }
  EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
44e8c2bff   Christoph Hellwig   blk-mq: refactor ...
33
  void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
bd166ef18   Jens Axboe   blk-mq-sched: add...
34
  {
44e8c2bff   Christoph Hellwig   blk-mq: refactor ...
35
36
  	struct request_queue *q = rq->q;
  	struct io_context *ioc = rq_ioc(bio);
bd166ef18   Jens Axboe   blk-mq-sched: add...
37
38
39
40
41
42
43
44
45
46
47
  	struct io_cq *icq;
  
  	spin_lock_irq(q->queue_lock);
  	icq = ioc_lookup_icq(ioc, q);
  	spin_unlock_irq(q->queue_lock);
  
  	if (!icq) {
  		icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
  		if (!icq)
  			return;
  	}
ea511e3c2   Christoph Hellwig   blk-mq: remove bl...
48
  	get_io_context(icq->ioc);
44e8c2bff   Christoph Hellwig   blk-mq: refactor ...
49
  	rq->elv.icq = icq;
bd166ef18   Jens Axboe   blk-mq-sched: add...
50
  }
8e8320c93   Jens Axboe   blk-mq: fix perfo...
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
  /*
   * Mark a hardware queue as needing a restart. For shared queues, maintain
   * a count of how many hardware queues are marked for restart.
   */
  static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
  {
  	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
  		return;
  
  	if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
  		struct request_queue *q = hctx->queue;
  
  		if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
  			atomic_inc(&q->shared_hctx_restart);
  	} else
  		set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
  }
  
  static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
  {
  	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
  		return false;
  
  	if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
  		struct request_queue *q = hctx->queue;
  
  		if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
  			atomic_dec(&q->shared_hctx_restart);
  	} else
  		clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
  
  	if (blk_mq_hctx_has_pending(hctx)) {
  		blk_mq_run_hw_queue(hctx, true);
  		return true;
  	}
  
  	return false;
  }
bd166ef18   Jens Axboe   blk-mq-sched: add...
89
90
  void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
  {
81380ca10   Omar Sandoval   blk-mq: use the r...
91
92
  	struct request_queue *q = hctx->queue;
  	struct elevator_queue *e = q->elevator;
64765a75e   Jens Axboe   blk-mq-sched: ask...
93
  	const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
85dcb3c85   Ming Lei   blk-mq-sched: dis...
94
  	bool do_sched_dispatch = true;
bd166ef18   Jens Axboe   blk-mq-sched: add...
95
  	LIST_HEAD(rq_list);
f4560ffe8   Ming Lei   blk-mq: use QUEUE...
96
97
  	/* RCU or SRCU read lock is needed before checking quiesced flag */
  	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
bd166ef18   Jens Axboe   blk-mq-sched: add...
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
  		return;
  
  	hctx->run++;
  
  	/*
  	 * If we have previous entries on our dispatch list, grab them first for
  	 * more fair dispatch.
  	 */
  	if (!list_empty_careful(&hctx->dispatch)) {
  		spin_lock(&hctx->lock);
  		if (!list_empty(&hctx->dispatch))
  			list_splice_init(&hctx->dispatch, &rq_list);
  		spin_unlock(&hctx->lock);
  	}
  
  	/*
  	 * Only ask the scheduler for requests, if we didn't have residual
  	 * requests from the dispatch list. This is to avoid the case where
  	 * we only ever dispatch a fraction of the requests available because
  	 * of low device queue depth. Once we pull requests out of the IO
  	 * scheduler, we can no longer merge or sort them. So it's best to
  	 * leave them there for as long as we can. Mark the hw queue as
  	 * needing a restart in that case.
  	 */
c13660a08   Jens Axboe   blk-mq-sched: cha...
122
  	if (!list_empty(&rq_list)) {
d38d35155   Omar Sandoval   blk-mq-sched: sep...
123
  		blk_mq_sched_mark_restart_hctx(hctx);
85dcb3c85   Ming Lei   blk-mq-sched: dis...
124
  		do_sched_dispatch = blk_mq_dispatch_rq_list(q, &rq_list);
64765a75e   Jens Axboe   blk-mq-sched: ask...
125
  	} else if (!has_sched_dispatch) {
c13660a08   Jens Axboe   blk-mq-sched: cha...
126
  		blk_mq_flush_busy_ctxs(hctx, &rq_list);
81380ca10   Omar Sandoval   blk-mq: use the r...
127
  		blk_mq_dispatch_rq_list(q, &rq_list);
64765a75e   Jens Axboe   blk-mq-sched: ask...
128
129
130
  	}
  
  	/*
85dcb3c85   Ming Lei   blk-mq-sched: dis...
131
132
133
  	 * We want to dispatch from the scheduler if there was nothing
  	 * on the dispatch list or we were able to dispatch from the
  	 * dispatch list.
64765a75e   Jens Axboe   blk-mq-sched: ask...
134
  	 */
85dcb3c85   Ming Lei   blk-mq-sched: dis...
135
  	if (do_sched_dispatch && has_sched_dispatch) {
c13660a08   Jens Axboe   blk-mq-sched: cha...
136
137
138
139
140
141
142
  		do {
  			struct request *rq;
  
  			rq = e->type->ops.mq.dispatch_request(hctx);
  			if (!rq)
  				break;
  			list_add(&rq->queuelist, &rq_list);
81380ca10   Omar Sandoval   blk-mq: use the r...
143
  		} while (blk_mq_dispatch_rq_list(q, &rq_list));
c13660a08   Jens Axboe   blk-mq-sched: cha...
144
  	}
bd166ef18   Jens Axboe   blk-mq-sched: add...
145
  }
e4d750c97   Jens Axboe   block: free merge...
146
147
  bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
  			    struct request **merged_request)
bd166ef18   Jens Axboe   blk-mq-sched: add...
148
149
  {
  	struct request *rq;
bd166ef18   Jens Axboe   blk-mq-sched: add...
150

34fe7c054   Christoph Hellwig   block: enumify EL...
151
152
  	switch (elv_merge(q, &rq, bio)) {
  	case ELEVATOR_BACK_MERGE:
bd166ef18   Jens Axboe   blk-mq-sched: add...
153
154
  		if (!blk_mq_sched_allow_merge(q, rq, bio))
  			return false;
34fe7c054   Christoph Hellwig   block: enumify EL...
155
156
157
158
159
160
161
  		if (!bio_attempt_back_merge(q, rq, bio))
  			return false;
  		*merged_request = attempt_back_merge(q, rq);
  		if (!*merged_request)
  			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
  		return true;
  	case ELEVATOR_FRONT_MERGE:
bd166ef18   Jens Axboe   blk-mq-sched: add...
162
163
  		if (!blk_mq_sched_allow_merge(q, rq, bio))
  			return false;
34fe7c054   Christoph Hellwig   block: enumify EL...
164
165
166
167
168
169
170
171
  		if (!bio_attempt_front_merge(q, rq, bio))
  			return false;
  		*merged_request = attempt_front_merge(q, rq);
  		if (!*merged_request)
  			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
  		return true;
  	default:
  		return false;
bd166ef18   Jens Axboe   blk-mq-sched: add...
172
  	}
bd166ef18   Jens Axboe   blk-mq-sched: add...
173
174
  }
  EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
9bddeb2a5   Ming Lei   blk-mq: make per-...
175
176
177
178
179
180
181
182
183
184
  /*
   * Reverse check our software queue for entries that we could potentially
   * merge with. Currently includes a hand-wavy stop count of 8, to not spend
   * too much time checking for merges.
   */
  static bool blk_mq_attempt_merge(struct request_queue *q,
  				 struct blk_mq_ctx *ctx, struct bio *bio)
  {
  	struct request *rq;
  	int checked = 8;
7b6078146   Bart Van Assche   blk-mq: Document ...
185
  	lockdep_assert_held(&ctx->lock);
9bddeb2a5   Ming Lei   blk-mq: make per-...
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
  	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
  		bool merged = false;
  
  		if (!checked--)
  			break;
  
  		if (!blk_rq_merge_ok(rq, bio))
  			continue;
  
  		switch (blk_try_merge(rq, bio)) {
  		case ELEVATOR_BACK_MERGE:
  			if (blk_mq_sched_allow_merge(q, rq, bio))
  				merged = bio_attempt_back_merge(q, rq, bio);
  			break;
  		case ELEVATOR_FRONT_MERGE:
  			if (blk_mq_sched_allow_merge(q, rq, bio))
  				merged = bio_attempt_front_merge(q, rq, bio);
  			break;
  		case ELEVATOR_DISCARD_MERGE:
  			merged = bio_attempt_discard_merge(q, rq, bio);
  			break;
  		default:
  			continue;
  		}
  
  		if (merged)
  			ctx->rq_merged++;
  		return merged;
  	}
  
  	return false;
  }
bd166ef18   Jens Axboe   blk-mq-sched: add...
218
219
220
  bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
  {
  	struct elevator_queue *e = q->elevator;
9bddeb2a5   Ming Lei   blk-mq: make per-...
221
222
223
  	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
  	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
  	bool ret = false;
bd166ef18   Jens Axboe   blk-mq-sched: add...
224

9bddeb2a5   Ming Lei   blk-mq: make per-...
225
  	if (e && e->type->ops.mq.bio_merge) {
bd166ef18   Jens Axboe   blk-mq-sched: add...
226
227
228
  		blk_mq_put_ctx(ctx);
  		return e->type->ops.mq.bio_merge(hctx, bio);
  	}
07a252b47   Ming Lei   blk-mq: only atte...
229
230
  	if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
  			!list_empty_careful(&ctx->rq_list)) {
9bddeb2a5   Ming Lei   blk-mq: make per-...
231
232
233
234
235
236
237
238
  		/* default per sw-queue merge */
  		spin_lock(&ctx->lock);
  		ret = blk_mq_attempt_merge(q, ctx, bio);
  		spin_unlock(&ctx->lock);
  	}
  
  	blk_mq_put_ctx(ctx);
  	return ret;
bd166ef18   Jens Axboe   blk-mq-sched: add...
239
240
241
242
243
244
245
246
247
248
249
250
251
  }
  
  bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
  {
  	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
  }
  EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
  
  void blk_mq_sched_request_inserted(struct request *rq)
  {
  	trace_block_rq_insert(rq->q, rq);
  }
  EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
0cacba6cf   Omar Sandoval   blk-mq-sched: byp...
252
253
  static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
  				       struct request *rq)
bd166ef18   Jens Axboe   blk-mq-sched: add...
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
  {
  	if (rq->tag == -1) {
  		rq->rq_flags |= RQF_SORTED;
  		return false;
  	}
  
  	/*
  	 * If we already have a real request tag, send directly to
  	 * the dispatch list.
  	 */
  	spin_lock(&hctx->lock);
  	list_add(&rq->queuelist, &hctx->dispatch);
  	spin_unlock(&hctx->lock);
  	return true;
  }
bd166ef18   Jens Axboe   blk-mq-sched: add...
269

6d8c6c0f9   Bart Van Assche   blk-mq: Restart a...
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
  /**
   * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
   * @pos:    loop cursor.
   * @skip:   the list element that will not be examined. Iteration starts at
   *          @skip->next.
   * @head:   head of the list to examine. This list must have at least one
   *          element, namely @skip.
   * @member: name of the list_head structure within typeof(*pos).
   */
  #define list_for_each_entry_rcu_rr(pos, skip, head, member)		\
  	for ((pos) = (skip);						\
  	     (pos = (pos)->member.next != (head) ? list_entry_rcu(	\
  			(pos)->member.next, typeof(*pos), member) :	\
  	      list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
  	     (pos) != (skip); )
50e1dab86   Jens Axboe   blk-mq-sched: fix...
285

6d8c6c0f9   Bart Van Assche   blk-mq: Restart a...
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
  /*
   * Called after a driver tag has been freed to check whether a hctx needs to
   * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
   * queues in a round-robin fashion if the tag set of @hctx is shared with other
   * hardware queues.
   */
  void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
  {
  	struct blk_mq_tags *const tags = hctx->tags;
  	struct blk_mq_tag_set *const set = hctx->queue->tag_set;
  	struct request_queue *const queue = hctx->queue, *q;
  	struct blk_mq_hw_ctx *hctx2;
  	unsigned int i, j;
  
  	if (set->flags & BLK_MQ_F_TAG_SHARED) {
8e8320c93   Jens Axboe   blk-mq: fix perfo...
301
302
303
304
305
306
  		/*
  		 * If this is 0, then we know that no hardware queues
  		 * have RESTART marked. We're done.
  		 */
  		if (!atomic_read(&queue->shared_hctx_restart))
  			return;
6d8c6c0f9   Bart Van Assche   blk-mq: Restart a...
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
  		rcu_read_lock();
  		list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
  					   tag_set_list) {
  			queue_for_each_hw_ctx(q, hctx2, i)
  				if (hctx2->tags == tags &&
  				    blk_mq_sched_restart_hctx(hctx2))
  					goto done;
  		}
  		j = hctx->queue_num + 1;
  		for (i = 0; i < queue->nr_hw_queues; i++, j++) {
  			if (j == queue->nr_hw_queues)
  				j = 0;
  			hctx2 = queue->queue_hw_ctx[j];
  			if (hctx2->tags == tags &&
  			    blk_mq_sched_restart_hctx(hctx2))
  				break;
d38d35155   Omar Sandoval   blk-mq-sched: sep...
323
  		}
6d8c6c0f9   Bart Van Assche   blk-mq: Restart a...
324
325
  done:
  		rcu_read_unlock();
d38d35155   Omar Sandoval   blk-mq-sched: sep...
326
  	} else {
50e1dab86   Jens Axboe   blk-mq-sched: fix...
327
  		blk_mq_sched_restart_hctx(hctx);
50e1dab86   Jens Axboe   blk-mq-sched: fix...
328
329
  	}
  }
bd6737f1a   Jens Axboe   blk-mq-sched: add...
330
331
332
333
334
335
336
337
338
339
340
341
  /*
   * Add flush/fua to the queue. If we fail getting a driver tag, then
   * punt to the requeue list. Requeue will re-invoke us from a context
   * that's safe to block from.
   */
  static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx,
  				      struct request *rq, bool can_block)
  {
  	if (blk_mq_get_driver_tag(rq, &hctx, can_block)) {
  		blk_insert_flush(rq);
  		blk_mq_run_hw_queue(hctx, true);
  	} else
c7a571b45   Jens Axboe   blk-mq-sched: don...
342
  		blk_mq_add_to_requeue_list(rq, false, true);
bd6737f1a   Jens Axboe   blk-mq-sched: add...
343
344
345
346
347
348
349
350
351
  }
  
  void blk_mq_sched_insert_request(struct request *rq, bool at_head,
  				 bool run_queue, bool async, bool can_block)
  {
  	struct request_queue *q = rq->q;
  	struct elevator_queue *e = q->elevator;
  	struct blk_mq_ctx *ctx = rq->mq_ctx;
  	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
f3a8ab7d5   Jens Axboe   block: cleanup re...
352
  	if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) {
bd6737f1a   Jens Axboe   blk-mq-sched: add...
353
354
355
  		blk_mq_sched_insert_flush(hctx, rq, can_block);
  		return;
  	}
0cacba6cf   Omar Sandoval   blk-mq-sched: byp...
356
357
  	if (e && blk_mq_sched_bypass_insert(hctx, rq))
  		goto run;
bd6737f1a   Jens Axboe   blk-mq-sched: add...
358
359
360
361
362
363
364
365
366
367
  	if (e && e->type->ops.mq.insert_requests) {
  		LIST_HEAD(list);
  
  		list_add(&rq->queuelist, &list);
  		e->type->ops.mq.insert_requests(hctx, &list, at_head);
  	} else {
  		spin_lock(&ctx->lock);
  		__blk_mq_insert_request(hctx, rq, at_head);
  		spin_unlock(&ctx->lock);
  	}
0cacba6cf   Omar Sandoval   blk-mq-sched: byp...
368
  run:
bd6737f1a   Jens Axboe   blk-mq-sched: add...
369
370
371
372
373
374
375
376
377
378
  	if (run_queue)
  		blk_mq_run_hw_queue(hctx, async);
  }
  
  void blk_mq_sched_insert_requests(struct request_queue *q,
  				  struct blk_mq_ctx *ctx,
  				  struct list_head *list, bool run_queue_async)
  {
  	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
  	struct elevator_queue *e = hctx->queue->elevator;
0cacba6cf   Omar Sandoval   blk-mq-sched: byp...
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
  	if (e) {
  		struct request *rq, *next;
  
  		/*
  		 * We bypass requests that already have a driver tag assigned,
  		 * which should only be flushes. Flushes are only ever inserted
  		 * as single requests, so we shouldn't ever hit the
  		 * WARN_ON_ONCE() below (but let's handle it just in case).
  		 */
  		list_for_each_entry_safe(rq, next, list, queuelist) {
  			if (WARN_ON_ONCE(rq->tag != -1)) {
  				list_del_init(&rq->queuelist);
  				blk_mq_sched_bypass_insert(hctx, rq);
  			}
  		}
  	}
bd6737f1a   Jens Axboe   blk-mq-sched: add...
395
396
397
398
399
400
401
  	if (e && e->type->ops.mq.insert_requests)
  		e->type->ops.mq.insert_requests(hctx, list, false);
  	else
  		blk_mq_insert_requests(hctx, ctx, list);
  
  	blk_mq_run_hw_queue(hctx, run_queue_async);
  }
bd166ef18   Jens Axboe   blk-mq-sched: add...
402
403
404
405
406
407
408
409
410
411
  static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
  				   struct blk_mq_hw_ctx *hctx,
  				   unsigned int hctx_idx)
  {
  	if (hctx->sched_tags) {
  		blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
  		blk_mq_free_rq_map(hctx->sched_tags);
  		hctx->sched_tags = NULL;
  	}
  }
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
  static int blk_mq_sched_alloc_tags(struct request_queue *q,
  				   struct blk_mq_hw_ctx *hctx,
  				   unsigned int hctx_idx)
  {
  	struct blk_mq_tag_set *set = q->tag_set;
  	int ret;
  
  	hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
  					       set->reserved_tags);
  	if (!hctx->sched_tags)
  		return -ENOMEM;
  
  	ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
  	if (ret)
  		blk_mq_sched_free_tags(set, hctx, hctx_idx);
  
  	return ret;
  }
54d5329d4   Omar Sandoval   blk-mq-sched: fix...
430
  static void blk_mq_sched_tags_teardown(struct request_queue *q)
bd166ef18   Jens Axboe   blk-mq-sched: add...
431
432
433
  {
  	struct blk_mq_tag_set *set = q->tag_set;
  	struct blk_mq_hw_ctx *hctx;
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
434
435
436
437
438
  	int i;
  
  	queue_for_each_hw_ctx(q, hctx, i)
  		blk_mq_sched_free_tags(set, hctx, i);
  }
93252632e   Omar Sandoval   blk-mq-sched: set...
439
440
441
442
  int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
  			   unsigned int hctx_idx)
  {
  	struct elevator_queue *e = q->elevator;
ee056f981   Omar Sandoval   blk-mq-sched: pro...
443
  	int ret;
93252632e   Omar Sandoval   blk-mq-sched: set...
444
445
446
  
  	if (!e)
  		return 0;
ee056f981   Omar Sandoval   blk-mq-sched: pro...
447
448
449
450
451
452
453
454
455
456
457
  	ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
  	if (ret)
  		return ret;
  
  	if (e->type->ops.mq.init_hctx) {
  		ret = e->type->ops.mq.init_hctx(hctx, hctx_idx);
  		if (ret) {
  			blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
  			return ret;
  		}
  	}
d332ce091   Omar Sandoval   blk-mq-debugfs: a...
458
  	blk_mq_debugfs_register_sched_hctx(q, hctx);
ee056f981   Omar Sandoval   blk-mq-sched: pro...
459
  	return 0;
93252632e   Omar Sandoval   blk-mq-sched: set...
460
461
462
463
464
465
466
467
468
  }
  
  void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
  			    unsigned int hctx_idx)
  {
  	struct elevator_queue *e = q->elevator;
  
  	if (!e)
  		return;
d332ce091   Omar Sandoval   blk-mq-debugfs: a...
469
  	blk_mq_debugfs_unregister_sched_hctx(hctx);
ee056f981   Omar Sandoval   blk-mq-sched: pro...
470
471
472
473
  	if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
  		e->type->ops.mq.exit_hctx(hctx, hctx_idx);
  		hctx->sched_data = NULL;
  	}
93252632e   Omar Sandoval   blk-mq-sched: set...
474
475
  	blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
  }
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
476
477
478
  int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
  {
  	struct blk_mq_hw_ctx *hctx;
ee056f981   Omar Sandoval   blk-mq-sched: pro...
479
  	struct elevator_queue *eq;
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
480
481
482
483
484
485
486
  	unsigned int i;
  	int ret;
  
  	if (!e) {
  		q->elevator = NULL;
  		return 0;
  	}
bd166ef18   Jens Axboe   blk-mq-sched: add...
487
488
  
  	/*
32825c45f   Ming Lei   blk-mq-sched: fix...
489
490
491
  	 * Default to double of smaller one between hw queue_depth and 128,
  	 * since we don't split into sync/async like the old code did.
  	 * Additionally, this is a per-hw queue depth.
bd166ef18   Jens Axboe   blk-mq-sched: add...
492
  	 */
32825c45f   Ming Lei   blk-mq-sched: fix...
493
494
  	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
  				   BLKDEV_MAX_RQ);
bd166ef18   Jens Axboe   blk-mq-sched: add...
495

bd166ef18   Jens Axboe   blk-mq-sched: add...
496
  	queue_for_each_hw_ctx(q, hctx, i) {
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
497
  		ret = blk_mq_sched_alloc_tags(q, hctx, i);
bd166ef18   Jens Axboe   blk-mq-sched: add...
498
  		if (ret)
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
499
  			goto err;
bd166ef18   Jens Axboe   blk-mq-sched: add...
500
  	}
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
501
502
503
  	ret = e->ops.mq.init_sched(q, e);
  	if (ret)
  		goto err;
bd166ef18   Jens Axboe   blk-mq-sched: add...
504

d332ce091   Omar Sandoval   blk-mq-debugfs: a...
505
506
507
508
  	blk_mq_debugfs_register_sched(q);
  
  	queue_for_each_hw_ctx(q, hctx, i) {
  		if (e->ops.mq.init_hctx) {
ee056f981   Omar Sandoval   blk-mq-sched: pro...
509
510
511
512
513
514
515
516
  			ret = e->ops.mq.init_hctx(hctx, i);
  			if (ret) {
  				eq = q->elevator;
  				blk_mq_exit_sched(q, eq);
  				kobject_put(&eq->kobj);
  				return ret;
  			}
  		}
d332ce091   Omar Sandoval   blk-mq-debugfs: a...
517
  		blk_mq_debugfs_register_sched_hctx(q, hctx);
ee056f981   Omar Sandoval   blk-mq-sched: pro...
518
  	}
bd166ef18   Jens Axboe   blk-mq-sched: add...
519
  	return 0;
bd166ef18   Jens Axboe   blk-mq-sched: add...
520

6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
521
  err:
54d5329d4   Omar Sandoval   blk-mq-sched: fix...
522
523
  	blk_mq_sched_tags_teardown(q);
  	q->elevator = NULL;
6917ff0b5   Omar Sandoval   blk-mq-sched: ref...
524
  	return ret;
bd166ef18   Jens Axboe   blk-mq-sched: add...
525
  }
d34849913   Jens Axboe   blk-mq-sched: all...
526

54d5329d4   Omar Sandoval   blk-mq-sched: fix...
527
528
  void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
  {
ee056f981   Omar Sandoval   blk-mq-sched: pro...
529
530
  	struct blk_mq_hw_ctx *hctx;
  	unsigned int i;
d332ce091   Omar Sandoval   blk-mq-debugfs: a...
531
532
533
534
535
  	queue_for_each_hw_ctx(q, hctx, i) {
  		blk_mq_debugfs_unregister_sched_hctx(hctx);
  		if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
  			e->type->ops.mq.exit_hctx(hctx, i);
  			hctx->sched_data = NULL;
ee056f981   Omar Sandoval   blk-mq-sched: pro...
536
537
  		}
  	}
d332ce091   Omar Sandoval   blk-mq-debugfs: a...
538
  	blk_mq_debugfs_unregister_sched(q);
54d5329d4   Omar Sandoval   blk-mq-sched: fix...
539
540
541
542
543
  	if (e->type->ops.mq.exit_sched)
  		e->type->ops.mq.exit_sched(e);
  	blk_mq_sched_tags_teardown(q);
  	q->elevator = NULL;
  }
d34849913   Jens Axboe   blk-mq-sched: all...
544
545
546
  int blk_mq_sched_init(struct request_queue *q)
  {
  	int ret;
d34849913   Jens Axboe   blk-mq-sched: all...
547
548
549
550
551
552
  	mutex_lock(&q->sysfs_lock);
  	ret = elevator_init(q, NULL);
  	mutex_unlock(&q->sysfs_lock);
  
  	return ret;
  }