Blame view

block/blk-mq-tag.c 17 KB
3dcf60bcb   Christoph Hellwig   block: add SPDX t...
1
  // SPDX-License-Identifier: GPL-2.0
75bb4625b   Jens Axboe   blk-mq: add file ...
2
  /*
88459642c   Omar Sandoval   blk-mq: abstract ...
3
4
5
   * Tag allocation using scalable bitmaps. Uses active queue tracking to support
   * fairer distribution of tags between multiple submitters when a shared tag map
   * is used.
75bb4625b   Jens Axboe   blk-mq: add file ...
6
7
8
   *
   * Copyright (C) 2013-2014 Jens Axboe
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
9
10
  #include <linux/kernel.h>
  #include <linux/module.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
11
12
  
  #include <linux/blk-mq.h>
f9934a80f   Ming Lei   blk-mq: introduce...
13
  #include <linux/delay.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
14
15
16
  #include "blk.h"
  #include "blk-mq.h"
  #include "blk-mq-tag.h"
0d2602ca3   Jens Axboe   blk-mq: improve s...
17
18
  /*
   * If a previously inactive queue goes active, bump the active user count.
d263ed992   Jianchao Wang   blk-mq: count the...
19
20
21
   * We need to do this before try to allocate driver tag, then even if fail
   * to get tag when first time, the other shared-tag users could reserve
   * budget for it.
0d2602ca3   Jens Axboe   blk-mq: improve s...
22
23
24
   */
  bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
  {
f1b49fdc1   John Garry   blk-mq: Record ac...
25
26
27
28
29
30
31
32
33
34
35
36
  	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
  		struct request_queue *q = hctx->queue;
  		struct blk_mq_tag_set *set = q->tag_set;
  
  		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
  		    !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
  			atomic_inc(&set->active_queues_shared_sbitmap);
  	} else {
  		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
  		    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  			atomic_inc(&hctx->tags->active_queues);
  	}
0d2602ca3   Jens Axboe   blk-mq: improve s...
37
38
39
40
41
  
  	return true;
  }
  
  /*
aed3ea94b   Jens Axboe   block: wake up wa...
42
   * Wakeup all potentially sleeping on tags
0d2602ca3   Jens Axboe   blk-mq: improve s...
43
   */
aed3ea94b   Jens Axboe   block: wake up wa...
44
  void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
0d2602ca3   Jens Axboe   blk-mq: improve s...
45
  {
222a5ae03   John Garry   blk-mq: Use point...
46
  	sbitmap_queue_wake_all(tags->bitmap_tags);
88459642c   Omar Sandoval   blk-mq: abstract ...
47
  	if (include_reserve)
222a5ae03   John Garry   blk-mq: Use point...
48
  		sbitmap_queue_wake_all(tags->breserved_tags);
0d2602ca3   Jens Axboe   blk-mq: improve s...
49
50
51
  }
  
  /*
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
52
53
54
55
56
57
   * If a previously busy queue goes inactive, potential waiters could now
   * be allowed to queue. Wake them up and check.
   */
  void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
  {
  	struct blk_mq_tags *tags = hctx->tags;
f1b49fdc1   John Garry   blk-mq: Record ac...
58
59
  	struct request_queue *q = hctx->queue;
  	struct blk_mq_tag_set *set = q->tag_set;
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
60

f1b49fdc1   John Garry   blk-mq: Record ac...
61
62
63
64
65
66
67
68
69
70
  	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
  		if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
  					&q->queue_flags))
  			return;
  		atomic_dec(&set->active_queues_shared_sbitmap);
  	} else {
  		if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  			return;
  		atomic_dec(&tags->active_queues);
  	}
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
71

aed3ea94b   Jens Axboe   block: wake up wa...
72
  	blk_mq_tag_wakeup_all(tags, false);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
73
  }
200e86b33   Jens Axboe   blk-mq: only appl...
74
75
  static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
  			    struct sbitmap_queue *bt)
4bb659b15   Jens Axboe   blk-mq: implement...
76
  {
285008501   Ming Lei   blk-mq: always al...
77
78
  	if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
  			!hctx_may_queue(data->hctx, bt))
766473681   Christoph Hellwig   blk-mq: use BLK_M...
79
  		return BLK_MQ_NO_TAG;
42fdc5e49   Christoph Hellwig   blk-mq: remove th...
80

229a92873   Omar Sandoval   blk-mq: add shall...
81
82
83
84
  	if (data->shallow_depth)
  		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
  	else
  		return __sbitmap_queue_get(bt);
4bb659b15   Jens Axboe   blk-mq: implement...
85
  }
4941115be   Jens Axboe   blk-mq-tag: clean...
86
  unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
320ae51fe   Jens Axboe   blk-mq: new multi...
87
  {
4941115be   Jens Axboe   blk-mq-tag: clean...
88
89
  	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
  	struct sbitmap_queue *bt;
88459642c   Omar Sandoval   blk-mq: abstract ...
90
  	struct sbq_wait_state *ws;
5d2ee7122   Jens Axboe   sbitmap: optimize...
91
  	DEFINE_SBQ_WAIT(wait);
4941115be   Jens Axboe   blk-mq-tag: clean...
92
  	unsigned int tag_offset;
320ae51fe   Jens Axboe   blk-mq: new multi...
93
  	int tag;
4941115be   Jens Axboe   blk-mq-tag: clean...
94
95
96
  	if (data->flags & BLK_MQ_REQ_RESERVED) {
  		if (unlikely(!tags->nr_reserved_tags)) {
  			WARN_ON_ONCE(1);
419c3d5e8   Christoph Hellwig   blk-mq: rename BL...
97
  			return BLK_MQ_NO_TAG;
4941115be   Jens Axboe   blk-mq-tag: clean...
98
  		}
222a5ae03   John Garry   blk-mq: Use point...
99
  		bt = tags->breserved_tags;
4941115be   Jens Axboe   blk-mq-tag: clean...
100
101
  		tag_offset = 0;
  	} else {
222a5ae03   John Garry   blk-mq: Use point...
102
  		bt = tags->bitmap_tags;
4941115be   Jens Axboe   blk-mq-tag: clean...
103
104
  		tag_offset = tags->nr_reserved_tags;
  	}
200e86b33   Jens Axboe   blk-mq: only appl...
105
  	tag = __blk_mq_get_tag(data, bt);
766473681   Christoph Hellwig   blk-mq: use BLK_M...
106
  	if (tag != BLK_MQ_NO_TAG)
4941115be   Jens Axboe   blk-mq-tag: clean...
107
  		goto found_tag;
4bb659b15   Jens Axboe   blk-mq: implement...
108

6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
109
  	if (data->flags & BLK_MQ_REQ_NOWAIT)
419c3d5e8   Christoph Hellwig   blk-mq: rename BL...
110
  		return BLK_MQ_NO_TAG;
4bb659b15   Jens Axboe   blk-mq: implement...
111

4941115be   Jens Axboe   blk-mq-tag: clean...
112
  	ws = bt_wait_ptr(bt, data->hctx);
4bb659b15   Jens Axboe   blk-mq: implement...
113
  	do {
e6fc46498   Ming Lei   blk-mq: avoid sta...
114
  		struct sbitmap_queue *bt_prev;
b32232073   Bart Van Assche   blk-mq: fix hang ...
115
116
117
  		/*
  		 * We're out of tags on this hardware queue, kick any
  		 * pending IO submits before going to sleep waiting for
8cecb07d7   Jens Axboe   blk-mq-tag: remov...
118
  		 * some to complete.
b32232073   Bart Van Assche   blk-mq: fix hang ...
119
  		 */
8cecb07d7   Jens Axboe   blk-mq-tag: remov...
120
  		blk_mq_run_hw_queue(data->hctx, false);
b32232073   Bart Van Assche   blk-mq: fix hang ...
121

080ff3511   Jens Axboe   blk-mq: re-check ...
122
123
124
125
  		/*
  		 * Retry tag allocation after running the hardware queue,
  		 * as running the queue may also have found completions.
  		 */
200e86b33   Jens Axboe   blk-mq: only appl...
126
  		tag = __blk_mq_get_tag(data, bt);
766473681   Christoph Hellwig   blk-mq: use BLK_M...
127
  		if (tag != BLK_MQ_NO_TAG)
080ff3511   Jens Axboe   blk-mq: re-check ...
128
  			break;
5d2ee7122   Jens Axboe   sbitmap: optimize...
129
  		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
4e5dff41b   Jens Axboe   blk-mq: improve h...
130
131
  
  		tag = __blk_mq_get_tag(data, bt);
766473681   Christoph Hellwig   blk-mq: use BLK_M...
132
  		if (tag != BLK_MQ_NO_TAG)
4e5dff41b   Jens Axboe   blk-mq: improve h...
133
  			break;
e6fc46498   Ming Lei   blk-mq: avoid sta...
134
  		bt_prev = bt;
4bb659b15   Jens Axboe   blk-mq: implement...
135
  		io_schedule();
cb96a42cc   Ming Lei   blk-mq: fix sched...
136

5d2ee7122   Jens Axboe   sbitmap: optimize...
137
  		sbitmap_finish_wait(bt, ws, &wait);
cb96a42cc   Ming Lei   blk-mq: fix sched...
138
  		data->ctx = blk_mq_get_ctx(data->q);
f9afca4d3   Jens Axboe   blk-mq: pass in r...
139
  		data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
140
  						data->ctx);
4941115be   Jens Axboe   blk-mq-tag: clean...
141
142
  		tags = blk_mq_tags_from_data(data);
  		if (data->flags & BLK_MQ_REQ_RESERVED)
222a5ae03   John Garry   blk-mq: Use point...
143
  			bt = tags->breserved_tags;
4941115be   Jens Axboe   blk-mq-tag: clean...
144
  		else
222a5ae03   John Garry   blk-mq: Use point...
145
  			bt = tags->bitmap_tags;
4941115be   Jens Axboe   blk-mq-tag: clean...
146

e6fc46498   Ming Lei   blk-mq: avoid sta...
147
148
149
150
151
152
153
  		/*
  		 * If destination hw queue is changed, fake wake up on
  		 * previous queue for compensating the wake up miss, so
  		 * other allocations on previous queue won't be starved.
  		 */
  		if (bt != bt_prev)
  			sbitmap_queue_wake_up(bt_prev);
4941115be   Jens Axboe   blk-mq-tag: clean...
154
  		ws = bt_wait_ptr(bt, data->hctx);
4bb659b15   Jens Axboe   blk-mq: implement...
155
  	} while (1);
5d2ee7122   Jens Axboe   sbitmap: optimize...
156
  	sbitmap_finish_wait(bt, ws, &wait);
320ae51fe   Jens Axboe   blk-mq: new multi...
157

4941115be   Jens Axboe   blk-mq-tag: clean...
158
  found_tag:
bf0beec06   Ming Lei   blk-mq: drain I/O...
159
160
161
162
163
164
165
166
  	/*
  	 * Give up this allocation if the hctx is inactive.  The caller will
  	 * retry on an active hctx.
  	 */
  	if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
  		blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
  		return BLK_MQ_NO_TAG;
  	}
4941115be   Jens Axboe   blk-mq-tag: clean...
167
  	return tag + tag_offset;
320ae51fe   Jens Axboe   blk-mq: new multi...
168
  }
cae740a04   John Garry   blk-mq: Remove so...
169
170
  void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
  		    unsigned int tag)
320ae51fe   Jens Axboe   blk-mq: new multi...
171
  {
415b806de   Sagi Grimberg   blk-mq-sched: All...
172
  	if (!blk_mq_tag_is_reserved(tags, tag)) {
4bb659b15   Jens Axboe   blk-mq: implement...
173
  		const int real_tag = tag - tags->nr_reserved_tags;
70114c393   Jens Axboe   blk-mq: cleanup t...
174
  		BUG_ON(real_tag >= tags->nr_tags);
222a5ae03   John Garry   blk-mq: Use point...
175
  		sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu);
70114c393   Jens Axboe   blk-mq: cleanup t...
176
177
  	} else {
  		BUG_ON(tag >= tags->nr_reserved_tags);
222a5ae03   John Garry   blk-mq: Use point...
178
  		sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu);
70114c393   Jens Axboe   blk-mq: cleanup t...
179
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
180
  }
88459642c   Omar Sandoval   blk-mq: abstract ...
181
182
183
184
185
186
187
188
  struct bt_iter_data {
  	struct blk_mq_hw_ctx *hctx;
  	busy_iter_fn *fn;
  	void *data;
  	bool reserved;
  };
  
  static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
320ae51fe   Jens Axboe   blk-mq: new multi...
189
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
190
191
192
193
  	struct bt_iter_data *iter_data = data;
  	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
  	struct blk_mq_tags *tags = hctx->tags;
  	bool reserved = iter_data->reserved;
81481eb42   Christoph Hellwig   blk-mq: fix and s...
194
  	struct request *rq;
4bb659b15   Jens Axboe   blk-mq: implement...
195

88459642c   Omar Sandoval   blk-mq: abstract ...
196
197
198
  	if (!reserved)
  		bitnr += tags->nr_reserved_tags;
  	rq = tags->rqs[bitnr];
4bb659b15   Jens Axboe   blk-mq: implement...
199

7f5562d5e   Jens Axboe   blk-mq-tag: check...
200
201
  	/*
  	 * We can hit rq == NULL here, because the tagging functions
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
202
  	 * test and set the bit before assigning ->rqs[].
7f5562d5e   Jens Axboe   blk-mq-tag: check...
203
  	 */
32bc15afe   John Garry   blk-mq: Facilitat...
204
  	if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx)
7baa85727   Jens Axboe   blk-mq-tag: chang...
205
  		return iter_data->fn(hctx, rq, iter_data->data, reserved);
88459642c   Omar Sandoval   blk-mq: abstract ...
206
207
  	return true;
  }
4bb659b15   Jens Axboe   blk-mq: implement...
208

c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
209
210
211
212
213
214
215
216
  /**
   * bt_for_each - iterate over the requests associated with a hardware queue
   * @hctx:	Hardware queue to examine.
   * @bt:		sbitmap to examine. This is either the breserved_tags member
   *		or the bitmap_tags member of struct blk_mq_tags.
   * @fn:		Pointer to the function that will be called for each request
   *		associated with @hctx that has been assigned a driver tag.
   *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
217
218
   *		where rq is a pointer to a request. Return true to continue
   *		iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
219
220
221
222
   * @data:	Will be passed as third argument to @fn.
   * @reserved:	Indicates whether @bt is the breserved_tags member or the
   *		bitmap_tags member of struct blk_mq_tags.
   */
88459642c   Omar Sandoval   blk-mq: abstract ...
223
224
225
226
227
228
229
230
231
232
233
  static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
  			busy_iter_fn *fn, void *data, bool reserved)
  {
  	struct bt_iter_data iter_data = {
  		.hctx = hctx,
  		.fn = fn,
  		.data = data,
  		.reserved = reserved,
  	};
  
  	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
320ae51fe   Jens Axboe   blk-mq: new multi...
234
  }
88459642c   Omar Sandoval   blk-mq: abstract ...
235
236
237
238
  struct bt_tags_iter_data {
  	struct blk_mq_tags *tags;
  	busy_tag_iter_fn *fn;
  	void *data;
602380d28   Ming Lei   blk-mq: add blk_m...
239
  	unsigned int flags;
88459642c   Omar Sandoval   blk-mq: abstract ...
240
  };
602380d28   Ming Lei   blk-mq: add blk_m...
241
242
  #define BT_TAG_ITER_RESERVED		(1 << 0)
  #define BT_TAG_ITER_STARTED		(1 << 1)
22f614bc0   Ming Lei   blk-mq: fix blk_m...
243
  #define BT_TAG_ITER_STATIC_RQS		(1 << 2)
602380d28   Ming Lei   blk-mq: add blk_m...
244

88459642c   Omar Sandoval   blk-mq: abstract ...
245
  static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
f26cdc853   Keith Busch   blk-mq: Shared ta...
246
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
247
248
  	struct bt_tags_iter_data *iter_data = data;
  	struct blk_mq_tags *tags = iter_data->tags;
602380d28   Ming Lei   blk-mq: add blk_m...
249
  	bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
f26cdc853   Keith Busch   blk-mq: Shared ta...
250
  	struct request *rq;
f26cdc853   Keith Busch   blk-mq: Shared ta...
251

88459642c   Omar Sandoval   blk-mq: abstract ...
252
253
  	if (!reserved)
  		bitnr += tags->nr_reserved_tags;
7f5562d5e   Jens Axboe   blk-mq-tag: check...
254
255
256
  
  	/*
  	 * We can hit rq == NULL here, because the tagging functions
22f614bc0   Ming Lei   blk-mq: fix blk_m...
257
  	 * test and set the bit before assigning ->rqs[].
7f5562d5e   Jens Axboe   blk-mq-tag: check...
258
  	 */
22f614bc0   Ming Lei   blk-mq: fix blk_m...
259
260
261
262
  	if (iter_data->flags & BT_TAG_ITER_STATIC_RQS)
  		rq = tags->static_rqs[bitnr];
  	else
  		rq = tags->rqs[bitnr];
602380d28   Ming Lei   blk-mq: add blk_m...
263
264
265
266
267
268
  	if (!rq)
  		return true;
  	if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
  	    !blk_mq_request_started(rq))
  		return true;
  	return iter_data->fn(rq, iter_data->data, reserved);
88459642c   Omar Sandoval   blk-mq: abstract ...
269
  }
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
270
271
272
273
274
275
276
  /**
   * bt_tags_for_each - iterate over the requests in a tag map
   * @tags:	Tag map to iterate over.
   * @bt:		sbitmap to examine. This is either the breserved_tags member
   *		or the bitmap_tags member of struct blk_mq_tags.
   * @fn:		Pointer to the function that will be called for each started
   *		request. @fn will be called as follows: @fn(rq, @data,
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
277
278
   *		@reserved) where rq is a pointer to a request. Return true
   *		to continue iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
279
   * @data:	Will be passed as second argument to @fn.
602380d28   Ming Lei   blk-mq: add blk_m...
280
   * @flags:	BT_TAG_ITER_*
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
281
   */
88459642c   Omar Sandoval   blk-mq: abstract ...
282
  static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
602380d28   Ming Lei   blk-mq: add blk_m...
283
  			     busy_tag_iter_fn *fn, void *data, unsigned int flags)
88459642c   Omar Sandoval   blk-mq: abstract ...
284
285
286
287
288
  {
  	struct bt_tags_iter_data iter_data = {
  		.tags = tags,
  		.fn = fn,
  		.data = data,
602380d28   Ming Lei   blk-mq: add blk_m...
289
  		.flags = flags,
88459642c   Omar Sandoval   blk-mq: abstract ...
290
291
292
293
  	};
  
  	if (tags->rqs)
  		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
f26cdc853   Keith Busch   blk-mq: Shared ta...
294
  }
602380d28   Ming Lei   blk-mq: add blk_m...
295
296
297
298
299
300
  static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
  		busy_tag_iter_fn *fn, void *priv, unsigned int flags)
  {
  	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
  
  	if (tags->nr_reserved_tags)
222a5ae03   John Garry   blk-mq: Use point...
301
  		bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
602380d28   Ming Lei   blk-mq: add blk_m...
302
  				 flags | BT_TAG_ITER_RESERVED);
222a5ae03   John Garry   blk-mq: Use point...
303
  	bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags);
602380d28   Ming Lei   blk-mq: add blk_m...
304
  }
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
305
  /**
602380d28   Ming Lei   blk-mq: add blk_m...
306
   * blk_mq_all_tag_iter - iterate over all requests in a tag map
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
307
   * @tags:	Tag map to iterate over.
602380d28   Ming Lei   blk-mq: add blk_m...
308
   * @fn:		Pointer to the function that will be called for each
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
309
310
   *		request. @fn will be called as follows: @fn(rq, @priv,
   *		reserved) where rq is a pointer to a request. 'reserved'
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
311
312
   *		indicates whether or not @rq is a reserved request. Return
   *		true to continue iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
313
   * @priv:	Will be passed as second argument to @fn.
22f614bc0   Ming Lei   blk-mq: fix blk_m...
314
315
   *
   * Caller has to pass the tag map from which requests are allocated.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
316
   */
602380d28   Ming Lei   blk-mq: add blk_m...
317
318
  void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
  		void *priv)
f26cdc853   Keith Busch   blk-mq: Shared ta...
319
  {
a8a5e383c   Baolin Wang   blk-mq: Remove re...
320
  	__blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
f26cdc853   Keith Busch   blk-mq: Shared ta...
321
  }
f26cdc853   Keith Busch   blk-mq: Shared ta...
322

c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
323
324
325
326
327
328
  /**
   * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
   * @tagset:	Tag set to iterate over.
   * @fn:		Pointer to the function that will be called for each started
   *		request. @fn will be called as follows: @fn(rq, @priv,
   *		reserved) where rq is a pointer to a request. 'reserved'
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
329
330
   *		indicates whether or not @rq is a reserved request. Return
   *		true to continue iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
331
332
   * @priv:	Will be passed as second argument to @fn.
   */
e0489487e   Sagi Grimberg   blk-mq: Export ta...
333
334
335
336
337
338
339
  void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
  		busy_tag_iter_fn *fn, void *priv)
  {
  	int i;
  
  	for (i = 0; i < tagset->nr_hw_queues; i++) {
  		if (tagset->tags && tagset->tags[i])
602380d28   Ming Lei   blk-mq: add blk_m...
340
341
  			__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
  					      BT_TAG_ITER_STARTED);
e0489487e   Sagi Grimberg   blk-mq: Export ta...
342
343
344
  	}
  }
  EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
f9934a80f   Ming Lei   blk-mq: introduce...
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
  static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
  		void *data, bool reserved)
  {
  	unsigned *count = data;
  
  	if (blk_mq_request_completed(rq))
  		(*count)++;
  	return true;
  }
  
  /**
   * blk_mq_tagset_wait_completed_request - wait until all completed req's
   * complete funtion is run
   * @tagset:	Tag set to drain completed request
   *
   * Note: This function has to be run after all IO queues are shutdown
   */
  void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
  {
  	while (true) {
  		unsigned count = 0;
  
  		blk_mq_tagset_busy_iter(tagset,
  				blk_mq_tagset_count_completed_rqs, &count);
  		if (!count)
  			break;
  		msleep(5);
  	}
  }
  EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
375
376
377
378
379
380
381
382
383
384
385
386
387
388
  /**
   * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
   * @q:		Request queue to examine.
   * @fn:		Pointer to the function that will be called for each request
   *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
   *		reserved) where rq is a pointer to a request and hctx points
   *		to the hardware queue associated with the request. 'reserved'
   *		indicates whether or not @rq is a reserved request.
   * @priv:	Will be passed as third argument to @fn.
   *
   * Note: if @q->tag_set is shared with other request queues then @fn will be
   * called for all requests on all queues that share that tag set and not only
   * for requests associated with @q.
   */
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
389
  void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
81481eb42   Christoph Hellwig   blk-mq: fix and s...
390
  		void *priv)
320ae51fe   Jens Axboe   blk-mq: new multi...
391
  {
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
392
393
  	struct blk_mq_hw_ctx *hctx;
  	int i;
f5bbbbe4d   Jianchao Wang   blk-mq: sync the ...
394
  	/*
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
395
396
  	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
  	 * while the queue is frozen. So we can use q_usage_counter to avoid
76cffccd6   yangerkun   block-mq: fix com...
397
  	 * racing with it.
f5bbbbe4d   Jianchao Wang   blk-mq: sync the ...
398
  	 */
530ca2c9b   Keith Busch   blk-mq: Allow blo...
399
  	if (!percpu_ref_tryget(&q->q_usage_counter))
f5bbbbe4d   Jianchao Wang   blk-mq: sync the ...
400
  		return;
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
401
402
403
404
405
  
  	queue_for_each_hw_ctx(q, hctx, i) {
  		struct blk_mq_tags *tags = hctx->tags;
  
  		/*
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
406
  		 * If no software queues are currently mapped to this
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
407
408
409
410
411
412
  		 * hardware queue, there's nothing to check
  		 */
  		if (!blk_mq_hw_queue_mapped(hctx))
  			continue;
  
  		if (tags->nr_reserved_tags)
222a5ae03   John Garry   blk-mq: Use point...
413
414
  			bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
  		bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
4bb659b15   Jens Axboe   blk-mq: implement...
415
  	}
530ca2c9b   Keith Busch   blk-mq: Allow blo...
416
  	blk_queue_exit(q);
4bb659b15   Jens Axboe   blk-mq: implement...
417
  }
f4a644db8   Omar Sandoval   sbitmap: push all...
418
419
  static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
  		    bool round_robin, int node)
4bb659b15   Jens Axboe   blk-mq: implement...
420
  {
f4a644db8   Omar Sandoval   sbitmap: push all...
421
422
  	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
  				       node);
4bb659b15   Jens Axboe   blk-mq: implement...
423
  }
4d063237b   Hannes Reinecke   blk-mq: Free tags...
424
425
  static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
  				   int node, int alloc_policy)
4bb659b15   Jens Axboe   blk-mq: implement...
426
427
  {
  	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
f4a644db8   Omar Sandoval   sbitmap: push all...
428
  	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
4bb659b15   Jens Axboe   blk-mq: implement...
429

222a5ae03   John Garry   blk-mq: Use point...
430
  	if (bt_alloc(&tags->__bitmap_tags, depth, round_robin, node))
4d063237b   Hannes Reinecke   blk-mq: Free tags...
431
  		return -ENOMEM;
222a5ae03   John Garry   blk-mq: Use point...
432
433
  	if (bt_alloc(&tags->__breserved_tags, tags->nr_reserved_tags,
  		     round_robin, node))
88459642c   Omar Sandoval   blk-mq: abstract ...
434
  		goto free_bitmap_tags;
4bb659b15   Jens Axboe   blk-mq: implement...
435

222a5ae03   John Garry   blk-mq: Use point...
436
437
  	tags->bitmap_tags = &tags->__bitmap_tags;
  	tags->breserved_tags = &tags->__breserved_tags;
4d063237b   Hannes Reinecke   blk-mq: Free tags...
438
  	return 0;
88459642c   Omar Sandoval   blk-mq: abstract ...
439
  free_bitmap_tags:
222a5ae03   John Garry   blk-mq: Use point...
440
  	sbitmap_queue_free(&tags->__bitmap_tags);
4d063237b   Hannes Reinecke   blk-mq: Free tags...
441
  	return -ENOMEM;
4bb659b15   Jens Axboe   blk-mq: implement...
442
  }
32bc15afe   John Garry   blk-mq: Facilitat...
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
  int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags)
  {
  	unsigned int depth = set->queue_depth - set->reserved_tags;
  	int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
  	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
  	int i, node = set->numa_node;
  
  	if (bt_alloc(&set->__bitmap_tags, depth, round_robin, node))
  		return -ENOMEM;
  	if (bt_alloc(&set->__breserved_tags, set->reserved_tags,
  		     round_robin, node))
  		goto free_bitmap_tags;
  
  	for (i = 0; i < set->nr_hw_queues; i++) {
  		struct blk_mq_tags *tags = set->tags[i];
  
  		tags->bitmap_tags = &set->__bitmap_tags;
  		tags->breserved_tags = &set->__breserved_tags;
  	}
  
  	return 0;
  free_bitmap_tags:
  	sbitmap_queue_free(&set->__bitmap_tags);
  	return -ENOMEM;
  }
  
  void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set)
  {
  	sbitmap_queue_free(&set->__bitmap_tags);
  	sbitmap_queue_free(&set->__breserved_tags);
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
474
  struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
24391c0dc   Shaohua Li   blk-mq: add tag a...
475
  				     unsigned int reserved_tags,
1c0706a70   John Garry   blk-mq: Pass flag...
476
  				     int node, unsigned int flags)
320ae51fe   Jens Axboe   blk-mq: new multi...
477
  {
1c0706a70   John Garry   blk-mq: Pass flag...
478
  	int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags);
320ae51fe   Jens Axboe   blk-mq: new multi...
479
  	struct blk_mq_tags *tags;
320ae51fe   Jens Axboe   blk-mq: new multi...
480
481
482
483
484
485
486
487
488
489
  
  	if (total_tags > BLK_MQ_TAG_MAX) {
  		pr_err("blk-mq: tag depth too large
  ");
  		return NULL;
  	}
  
  	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
  	if (!tags)
  		return NULL;
320ae51fe   Jens Axboe   blk-mq: new multi...
490
491
  	tags->nr_tags = total_tags;
  	tags->nr_reserved_tags = reserved_tags;
320ae51fe   Jens Axboe   blk-mq: new multi...
492

32bc15afe   John Garry   blk-mq: Facilitat...
493
494
  	if (flags & BLK_MQ_F_TAG_HCTX_SHARED)
  		return tags;
4d063237b   Hannes Reinecke   blk-mq: Free tags...
495
496
497
498
499
  	if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) {
  		kfree(tags);
  		return NULL;
  	}
  	return tags;
320ae51fe   Jens Axboe   blk-mq: new multi...
500
  }
1c0706a70   John Garry   blk-mq: Pass flag...
501
  void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
320ae51fe   Jens Axboe   blk-mq: new multi...
502
  {
32bc15afe   John Garry   blk-mq: Facilitat...
503
504
505
506
  	if (!(flags & BLK_MQ_F_TAG_HCTX_SHARED)) {
  		sbitmap_queue_free(tags->bitmap_tags);
  		sbitmap_queue_free(tags->breserved_tags);
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
507
508
  	kfree(tags);
  }
70f36b600   Jens Axboe   blk-mq: allow res...
509
510
511
  int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
  			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
  			    bool can_grow)
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
512
  {
70f36b600   Jens Axboe   blk-mq: allow res...
513
514
515
  	struct blk_mq_tags *tags = *tagsptr;
  
  	if (tdepth <= tags->nr_reserved_tags)
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
516
517
518
  		return -EINVAL;
  
  	/*
70f36b600   Jens Axboe   blk-mq: allow res...
519
520
  	 * If we are allowed to grow beyond the original size, allocate
  	 * a new set of tags before freeing the old one.
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
521
  	 */
70f36b600   Jens Axboe   blk-mq: allow res...
522
523
  	if (tdepth > tags->nr_tags) {
  		struct blk_mq_tag_set *set = hctx->queue->tag_set;
32bc15afe   John Garry   blk-mq: Facilitat...
524
525
  		/* Only sched tags can grow, so clear HCTX_SHARED flag  */
  		unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
70f36b600   Jens Axboe   blk-mq: allow res...
526
527
528
529
530
531
532
533
534
535
536
537
  		struct blk_mq_tags *new;
  		bool ret;
  
  		if (!can_grow)
  			return -EINVAL;
  
  		/*
  		 * We need some sort of upper limit, set it high enough that
  		 * no valid use cases should require more.
  		 */
  		if (tdepth > 16 * BLKDEV_MAX_RQ)
  			return -EINVAL;
75d6e175f   Ming Lei   blk-mq: fix updat...
538
  		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
1c0706a70   John Garry   blk-mq: Pass flag...
539
  				tags->nr_reserved_tags, flags);
70f36b600   Jens Axboe   blk-mq: allow res...
540
541
542
543
  		if (!new)
  			return -ENOMEM;
  		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
  		if (ret) {
1c0706a70   John Garry   blk-mq: Pass flag...
544
  			blk_mq_free_rq_map(new, flags);
70f36b600   Jens Axboe   blk-mq: allow res...
545
546
547
548
  			return -ENOMEM;
  		}
  
  		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
1c0706a70   John Garry   blk-mq: Pass flag...
549
  		blk_mq_free_rq_map(*tagsptr, flags);
70f36b600   Jens Axboe   blk-mq: allow res...
550
551
552
553
554
555
  		*tagsptr = new;
  	} else {
  		/*
  		 * Don't need (or can't) update reserved tags here, they
  		 * remain static and should never need resizing.
  		 */
222a5ae03   John Garry   blk-mq: Use point...
556
  		sbitmap_queue_resize(tags->bitmap_tags,
75d6e175f   Ming Lei   blk-mq: fix updat...
557
  				tdepth - tags->nr_reserved_tags);
70f36b600   Jens Axboe   blk-mq: allow res...
558
  	}
88459642c   Omar Sandoval   blk-mq: abstract ...
559

e3a2b3f93   Jens Axboe   blk-mq: allow cha...
560
561
  	return 0;
  }
32bc15afe   John Garry   blk-mq: Facilitat...
562
563
564
565
  void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size)
  {
  	sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags);
  }
205fb5f5b   Bart Van Assche   blk-mq: add blk_m...
566
567
568
569
570
571
572
573
574
575
576
577
578
579
  /**
   * blk_mq_unique_tag() - return a tag that is unique queue-wide
   * @rq: request for which to compute a unique tag
   *
   * The tag field in struct request is unique per hardware queue but not over
   * all hardware queues. Hence this function that returns a tag with the
   * hardware context index in the upper bits and the per hardware queue tag in
   * the lower bits.
   *
   * Note: When called for a request that is queued on a non-multiqueue request
   * queue, the hardware context index is set to zero.
   */
  u32 blk_mq_unique_tag(struct request *rq)
  {
ea4f995ee   Jens Axboe   blk-mq: cache req...
580
  	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
205fb5f5b   Bart Van Assche   blk-mq: add blk_m...
581
582
583
  		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
  }
  EXPORT_SYMBOL(blk_mq_unique_tag);