Blame view

block/blk-mq-tag.c 15 KB
3dcf60bcb   Christoph Hellwig   block: add SPDX t...
1
  // SPDX-License-Identifier: GPL-2.0
75bb4625b   Jens Axboe   blk-mq: add file ...
2
  /*
88459642c   Omar Sandoval   blk-mq: abstract ...
3
4
5
   * Tag allocation using scalable bitmaps. Uses active queue tracking to support
   * fairer distribution of tags between multiple submitters when a shared tag map
   * is used.
75bb4625b   Jens Axboe   blk-mq: add file ...
6
7
8
   *
   * Copyright (C) 2013-2014 Jens Axboe
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
9
10
  #include <linux/kernel.h>
  #include <linux/module.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
11
12
  
  #include <linux/blk-mq.h>
f9934a80f   Ming Lei   blk-mq: introduce...
13
  #include <linux/delay.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
14
15
16
  #include "blk.h"
  #include "blk-mq.h"
  #include "blk-mq-tag.h"
0d2602ca3   Jens Axboe   blk-mq: improve s...
17
18
  /*
   * If a previously inactive queue goes active, bump the active user count.
d263ed992   Jianchao Wang   blk-mq: count the...
19
20
21
   * We need to do this before try to allocate driver tag, then even if fail
   * to get tag when first time, the other shared-tag users could reserve
   * budget for it.
0d2602ca3   Jens Axboe   blk-mq: improve s...
22
23
24
25
26
27
28
29
30
31
32
   */
  bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
  {
  	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
  	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  		atomic_inc(&hctx->tags->active_queues);
  
  	return true;
  }
  
  /*
aed3ea94b   Jens Axboe   block: wake up wa...
33
   * Wakeup all potentially sleeping on tags
0d2602ca3   Jens Axboe   blk-mq: improve s...
34
   */
aed3ea94b   Jens Axboe   block: wake up wa...
35
  void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
0d2602ca3   Jens Axboe   blk-mq: improve s...
36
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
37
38
39
  	sbitmap_queue_wake_all(&tags->bitmap_tags);
  	if (include_reserve)
  		sbitmap_queue_wake_all(&tags->breserved_tags);
0d2602ca3   Jens Axboe   blk-mq: improve s...
40
41
42
  }
  
  /*
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
43
44
45
46
47
48
49
50
51
52
53
   * If a previously busy queue goes inactive, potential waiters could now
   * be allowed to queue. Wake them up and check.
   */
  void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
  {
  	struct blk_mq_tags *tags = hctx->tags;
  
  	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  		return;
  
  	atomic_dec(&tags->active_queues);
aed3ea94b   Jens Axboe   block: wake up wa...
54
  	blk_mq_tag_wakeup_all(tags, false);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
55
  }
200e86b33   Jens Axboe   blk-mq: only appl...
56
57
  static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
  			    struct sbitmap_queue *bt)
4bb659b15   Jens Axboe   blk-mq: implement...
58
  {
42fdc5e49   Christoph Hellwig   blk-mq: remove th...
59
  	if (!data->q->elevator && !hctx_may_queue(data->hctx, bt))
766473681   Christoph Hellwig   blk-mq: use BLK_M...
60
  		return BLK_MQ_NO_TAG;
42fdc5e49   Christoph Hellwig   blk-mq: remove th...
61

229a92873   Omar Sandoval   blk-mq: add shall...
62
63
64
65
  	if (data->shallow_depth)
  		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
  	else
  		return __sbitmap_queue_get(bt);
4bb659b15   Jens Axboe   blk-mq: implement...
66
  }
4941115be   Jens Axboe   blk-mq-tag: clean...
67
  unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
320ae51fe   Jens Axboe   blk-mq: new multi...
68
  {
4941115be   Jens Axboe   blk-mq-tag: clean...
69
70
  	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
  	struct sbitmap_queue *bt;
88459642c   Omar Sandoval   blk-mq: abstract ...
71
  	struct sbq_wait_state *ws;
5d2ee7122   Jens Axboe   sbitmap: optimize...
72
  	DEFINE_SBQ_WAIT(wait);
4941115be   Jens Axboe   blk-mq-tag: clean...
73
  	unsigned int tag_offset;
320ae51fe   Jens Axboe   blk-mq: new multi...
74
  	int tag;
4941115be   Jens Axboe   blk-mq-tag: clean...
75
76
77
  	if (data->flags & BLK_MQ_REQ_RESERVED) {
  		if (unlikely(!tags->nr_reserved_tags)) {
  			WARN_ON_ONCE(1);
419c3d5e8   Christoph Hellwig   blk-mq: rename BL...
78
  			return BLK_MQ_NO_TAG;
4941115be   Jens Axboe   blk-mq-tag: clean...
79
80
81
82
83
84
85
  		}
  		bt = &tags->breserved_tags;
  		tag_offset = 0;
  	} else {
  		bt = &tags->bitmap_tags;
  		tag_offset = tags->nr_reserved_tags;
  	}
200e86b33   Jens Axboe   blk-mq: only appl...
86
  	tag = __blk_mq_get_tag(data, bt);
766473681   Christoph Hellwig   blk-mq: use BLK_M...
87
  	if (tag != BLK_MQ_NO_TAG)
4941115be   Jens Axboe   blk-mq-tag: clean...
88
  		goto found_tag;
4bb659b15   Jens Axboe   blk-mq: implement...
89

6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
90
  	if (data->flags & BLK_MQ_REQ_NOWAIT)
419c3d5e8   Christoph Hellwig   blk-mq: rename BL...
91
  		return BLK_MQ_NO_TAG;
4bb659b15   Jens Axboe   blk-mq: implement...
92

4941115be   Jens Axboe   blk-mq-tag: clean...
93
  	ws = bt_wait_ptr(bt, data->hctx);
4bb659b15   Jens Axboe   blk-mq: implement...
94
  	do {
e6fc46498   Ming Lei   blk-mq: avoid sta...
95
  		struct sbitmap_queue *bt_prev;
b32232073   Bart Van Assche   blk-mq: fix hang ...
96
97
98
  		/*
  		 * We're out of tags on this hardware queue, kick any
  		 * pending IO submits before going to sleep waiting for
8cecb07d7   Jens Axboe   blk-mq-tag: remov...
99
  		 * some to complete.
b32232073   Bart Van Assche   blk-mq: fix hang ...
100
  		 */
8cecb07d7   Jens Axboe   blk-mq-tag: remov...
101
  		blk_mq_run_hw_queue(data->hctx, false);
b32232073   Bart Van Assche   blk-mq: fix hang ...
102

080ff3511   Jens Axboe   blk-mq: re-check ...
103
104
105
106
  		/*
  		 * Retry tag allocation after running the hardware queue,
  		 * as running the queue may also have found completions.
  		 */
200e86b33   Jens Axboe   blk-mq: only appl...
107
  		tag = __blk_mq_get_tag(data, bt);
766473681   Christoph Hellwig   blk-mq: use BLK_M...
108
  		if (tag != BLK_MQ_NO_TAG)
080ff3511   Jens Axboe   blk-mq: re-check ...
109
  			break;
5d2ee7122   Jens Axboe   sbitmap: optimize...
110
  		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
4e5dff41b   Jens Axboe   blk-mq: improve h...
111
112
  
  		tag = __blk_mq_get_tag(data, bt);
766473681   Christoph Hellwig   blk-mq: use BLK_M...
113
  		if (tag != BLK_MQ_NO_TAG)
4e5dff41b   Jens Axboe   blk-mq: improve h...
114
  			break;
e6fc46498   Ming Lei   blk-mq: avoid sta...
115
  		bt_prev = bt;
4bb659b15   Jens Axboe   blk-mq: implement...
116
  		io_schedule();
cb96a42cc   Ming Lei   blk-mq: fix sched...
117

5d2ee7122   Jens Axboe   sbitmap: optimize...
118
  		sbitmap_finish_wait(bt, ws, &wait);
cb96a42cc   Ming Lei   blk-mq: fix sched...
119
  		data->ctx = blk_mq_get_ctx(data->q);
f9afca4d3   Jens Axboe   blk-mq: pass in r...
120
  		data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
121
  						data->ctx);
4941115be   Jens Axboe   blk-mq-tag: clean...
122
123
124
125
126
  		tags = blk_mq_tags_from_data(data);
  		if (data->flags & BLK_MQ_REQ_RESERVED)
  			bt = &tags->breserved_tags;
  		else
  			bt = &tags->bitmap_tags;
e6fc46498   Ming Lei   blk-mq: avoid sta...
127
128
129
130
131
132
133
  		/*
  		 * If destination hw queue is changed, fake wake up on
  		 * previous queue for compensating the wake up miss, so
  		 * other allocations on previous queue won't be starved.
  		 */
  		if (bt != bt_prev)
  			sbitmap_queue_wake_up(bt_prev);
4941115be   Jens Axboe   blk-mq-tag: clean...
134
  		ws = bt_wait_ptr(bt, data->hctx);
4bb659b15   Jens Axboe   blk-mq: implement...
135
  	} while (1);
5d2ee7122   Jens Axboe   sbitmap: optimize...
136
  	sbitmap_finish_wait(bt, ws, &wait);
320ae51fe   Jens Axboe   blk-mq: new multi...
137

4941115be   Jens Axboe   blk-mq-tag: clean...
138
  found_tag:
bf0beec06   Ming Lei   blk-mq: drain I/O...
139
140
141
142
143
144
145
146
  	/*
  	 * Give up this allocation if the hctx is inactive.  The caller will
  	 * retry on an active hctx.
  	 */
  	if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
  		blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
  		return BLK_MQ_NO_TAG;
  	}
4941115be   Jens Axboe   blk-mq-tag: clean...
147
  	return tag + tag_offset;
320ae51fe   Jens Axboe   blk-mq: new multi...
148
  }
cae740a04   John Garry   blk-mq: Remove so...
149
150
  void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
  		    unsigned int tag)
320ae51fe   Jens Axboe   blk-mq: new multi...
151
  {
415b806de   Sagi Grimberg   blk-mq-sched: All...
152
  	if (!blk_mq_tag_is_reserved(tags, tag)) {
4bb659b15   Jens Axboe   blk-mq: implement...
153
  		const int real_tag = tag - tags->nr_reserved_tags;
70114c393   Jens Axboe   blk-mq: cleanup t...
154
  		BUG_ON(real_tag >= tags->nr_tags);
f4a644db8   Omar Sandoval   sbitmap: push all...
155
  		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
70114c393   Jens Axboe   blk-mq: cleanup t...
156
157
  	} else {
  		BUG_ON(tag >= tags->nr_reserved_tags);
f4a644db8   Omar Sandoval   sbitmap: push all...
158
  		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
70114c393   Jens Axboe   blk-mq: cleanup t...
159
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
160
  }
88459642c   Omar Sandoval   blk-mq: abstract ...
161
162
163
164
165
166
167
168
  struct bt_iter_data {
  	struct blk_mq_hw_ctx *hctx;
  	busy_iter_fn *fn;
  	void *data;
  	bool reserved;
  };
  
  static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
320ae51fe   Jens Axboe   blk-mq: new multi...
169
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
170
171
172
173
  	struct bt_iter_data *iter_data = data;
  	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
  	struct blk_mq_tags *tags = hctx->tags;
  	bool reserved = iter_data->reserved;
81481eb42   Christoph Hellwig   blk-mq: fix and s...
174
  	struct request *rq;
4bb659b15   Jens Axboe   blk-mq: implement...
175

88459642c   Omar Sandoval   blk-mq: abstract ...
176
177
178
  	if (!reserved)
  		bitnr += tags->nr_reserved_tags;
  	rq = tags->rqs[bitnr];
4bb659b15   Jens Axboe   blk-mq: implement...
179

7f5562d5e   Jens Axboe   blk-mq-tag: check...
180
181
  	/*
  	 * We can hit rq == NULL here, because the tagging functions
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
182
  	 * test and set the bit before assigning ->rqs[].
7f5562d5e   Jens Axboe   blk-mq-tag: check...
183
184
  	 */
  	if (rq && rq->q == hctx->queue)
7baa85727   Jens Axboe   blk-mq-tag: chang...
185
  		return iter_data->fn(hctx, rq, iter_data->data, reserved);
88459642c   Omar Sandoval   blk-mq: abstract ...
186
187
  	return true;
  }
4bb659b15   Jens Axboe   blk-mq: implement...
188

c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
189
190
191
192
193
194
195
196
  /**
   * bt_for_each - iterate over the requests associated with a hardware queue
   * @hctx:	Hardware queue to examine.
   * @bt:		sbitmap to examine. This is either the breserved_tags member
   *		or the bitmap_tags member of struct blk_mq_tags.
   * @fn:		Pointer to the function that will be called for each request
   *		associated with @hctx that has been assigned a driver tag.
   *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
197
198
   *		where rq is a pointer to a request. Return true to continue
   *		iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
199
200
201
202
   * @data:	Will be passed as third argument to @fn.
   * @reserved:	Indicates whether @bt is the breserved_tags member or the
   *		bitmap_tags member of struct blk_mq_tags.
   */
88459642c   Omar Sandoval   blk-mq: abstract ...
203
204
205
206
207
208
209
210
211
212
213
  static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
  			busy_iter_fn *fn, void *data, bool reserved)
  {
  	struct bt_iter_data iter_data = {
  		.hctx = hctx,
  		.fn = fn,
  		.data = data,
  		.reserved = reserved,
  	};
  
  	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
320ae51fe   Jens Axboe   blk-mq: new multi...
214
  }
88459642c   Omar Sandoval   blk-mq: abstract ...
215
216
217
218
  struct bt_tags_iter_data {
  	struct blk_mq_tags *tags;
  	busy_tag_iter_fn *fn;
  	void *data;
602380d28   Ming Lei   blk-mq: add blk_m...
219
  	unsigned int flags;
88459642c   Omar Sandoval   blk-mq: abstract ...
220
  };
602380d28   Ming Lei   blk-mq: add blk_m...
221
222
  #define BT_TAG_ITER_RESERVED		(1 << 0)
  #define BT_TAG_ITER_STARTED		(1 << 1)
22f614bc0   Ming Lei   blk-mq: fix blk_m...
223
  #define BT_TAG_ITER_STATIC_RQS		(1 << 2)
602380d28   Ming Lei   blk-mq: add blk_m...
224

88459642c   Omar Sandoval   blk-mq: abstract ...
225
  static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
f26cdc853   Keith Busch   blk-mq: Shared ta...
226
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
227
228
  	struct bt_tags_iter_data *iter_data = data;
  	struct blk_mq_tags *tags = iter_data->tags;
602380d28   Ming Lei   blk-mq: add blk_m...
229
  	bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
f26cdc853   Keith Busch   blk-mq: Shared ta...
230
  	struct request *rq;
f26cdc853   Keith Busch   blk-mq: Shared ta...
231

88459642c   Omar Sandoval   blk-mq: abstract ...
232
233
  	if (!reserved)
  		bitnr += tags->nr_reserved_tags;
7f5562d5e   Jens Axboe   blk-mq-tag: check...
234
235
236
  
  	/*
  	 * We can hit rq == NULL here, because the tagging functions
22f614bc0   Ming Lei   blk-mq: fix blk_m...
237
  	 * test and set the bit before assigning ->rqs[].
7f5562d5e   Jens Axboe   blk-mq-tag: check...
238
  	 */
22f614bc0   Ming Lei   blk-mq: fix blk_m...
239
240
241
242
  	if (iter_data->flags & BT_TAG_ITER_STATIC_RQS)
  		rq = tags->static_rqs[bitnr];
  	else
  		rq = tags->rqs[bitnr];
602380d28   Ming Lei   blk-mq: add blk_m...
243
244
245
246
247
248
  	if (!rq)
  		return true;
  	if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
  	    !blk_mq_request_started(rq))
  		return true;
  	return iter_data->fn(rq, iter_data->data, reserved);
88459642c   Omar Sandoval   blk-mq: abstract ...
249
  }
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
250
251
252
253
254
255
256
  /**
   * bt_tags_for_each - iterate over the requests in a tag map
   * @tags:	Tag map to iterate over.
   * @bt:		sbitmap to examine. This is either the breserved_tags member
   *		or the bitmap_tags member of struct blk_mq_tags.
   * @fn:		Pointer to the function that will be called for each started
   *		request. @fn will be called as follows: @fn(rq, @data,
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
257
258
   *		@reserved) where rq is a pointer to a request. Return true
   *		to continue iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
259
   * @data:	Will be passed as second argument to @fn.
602380d28   Ming Lei   blk-mq: add blk_m...
260
   * @flags:	BT_TAG_ITER_*
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
261
   */
88459642c   Omar Sandoval   blk-mq: abstract ...
262
  static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
602380d28   Ming Lei   blk-mq: add blk_m...
263
  			     busy_tag_iter_fn *fn, void *data, unsigned int flags)
88459642c   Omar Sandoval   blk-mq: abstract ...
264
265
266
267
268
  {
  	struct bt_tags_iter_data iter_data = {
  		.tags = tags,
  		.fn = fn,
  		.data = data,
602380d28   Ming Lei   blk-mq: add blk_m...
269
  		.flags = flags,
88459642c   Omar Sandoval   blk-mq: abstract ...
270
271
272
273
  	};
  
  	if (tags->rqs)
  		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
f26cdc853   Keith Busch   blk-mq: Shared ta...
274
  }
602380d28   Ming Lei   blk-mq: add blk_m...
275
276
277
278
279
280
281
282
283
284
  static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
  		busy_tag_iter_fn *fn, void *priv, unsigned int flags)
  {
  	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
  
  	if (tags->nr_reserved_tags)
  		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
  				 flags | BT_TAG_ITER_RESERVED);
  	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
  }
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
285
  /**
602380d28   Ming Lei   blk-mq: add blk_m...
286
   * blk_mq_all_tag_iter - iterate over all requests in a tag map
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
287
   * @tags:	Tag map to iterate over.
602380d28   Ming Lei   blk-mq: add blk_m...
288
   * @fn:		Pointer to the function that will be called for each
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
289
290
   *		request. @fn will be called as follows: @fn(rq, @priv,
   *		reserved) where rq is a pointer to a request. 'reserved'
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
291
292
   *		indicates whether or not @rq is a reserved request. Return
   *		true to continue iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
293
   * @priv:	Will be passed as second argument to @fn.
22f614bc0   Ming Lei   blk-mq: fix blk_m...
294
295
   *
   * Caller has to pass the tag map from which requests are allocated.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
296
   */
602380d28   Ming Lei   blk-mq: add blk_m...
297
298
  void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
  		void *priv)
f26cdc853   Keith Busch   blk-mq: Shared ta...
299
  {
a8a5e383c   Baolin Wang   blk-mq: Remove re...
300
  	__blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
f26cdc853   Keith Busch   blk-mq: Shared ta...
301
  }
f26cdc853   Keith Busch   blk-mq: Shared ta...
302

c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
303
304
305
306
307
308
  /**
   * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
   * @tagset:	Tag set to iterate over.
   * @fn:		Pointer to the function that will be called for each started
   *		request. @fn will be called as follows: @fn(rq, @priv,
   *		reserved) where rq is a pointer to a request. 'reserved'
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
309
310
   *		indicates whether or not @rq is a reserved request. Return
   *		true to continue iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
311
312
   * @priv:	Will be passed as second argument to @fn.
   */
e0489487e   Sagi Grimberg   blk-mq: Export ta...
313
314
315
316
317
318
319
  void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
  		busy_tag_iter_fn *fn, void *priv)
  {
  	int i;
  
  	for (i = 0; i < tagset->nr_hw_queues; i++) {
  		if (tagset->tags && tagset->tags[i])
602380d28   Ming Lei   blk-mq: add blk_m...
320
321
  			__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
  					      BT_TAG_ITER_STARTED);
e0489487e   Sagi Grimberg   blk-mq: Export ta...
322
323
324
  	}
  }
  EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
f9934a80f   Ming Lei   blk-mq: introduce...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
  static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
  		void *data, bool reserved)
  {
  	unsigned *count = data;
  
  	if (blk_mq_request_completed(rq))
  		(*count)++;
  	return true;
  }
  
  /**
   * blk_mq_tagset_wait_completed_request - wait until all completed req's
   * complete funtion is run
   * @tagset:	Tag set to drain completed request
   *
   * Note: This function has to be run after all IO queues are shutdown
   */
  void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
  {
  	while (true) {
  		unsigned count = 0;
  
  		blk_mq_tagset_busy_iter(tagset,
  				blk_mq_tagset_count_completed_rqs, &count);
  		if (!count)
  			break;
  		msleep(5);
  	}
  }
  EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
355
356
357
358
359
360
361
362
363
364
365
366
367
368
  /**
   * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
   * @q:		Request queue to examine.
   * @fn:		Pointer to the function that will be called for each request
   *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
   *		reserved) where rq is a pointer to a request and hctx points
   *		to the hardware queue associated with the request. 'reserved'
   *		indicates whether or not @rq is a reserved request.
   * @priv:	Will be passed as third argument to @fn.
   *
   * Note: if @q->tag_set is shared with other request queues then @fn will be
   * called for all requests on all queues that share that tag set and not only
   * for requests associated with @q.
   */
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
369
  void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
81481eb42   Christoph Hellwig   blk-mq: fix and s...
370
  		void *priv)
320ae51fe   Jens Axboe   blk-mq: new multi...
371
  {
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
372
373
  	struct blk_mq_hw_ctx *hctx;
  	int i;
f5bbbbe4d   Jianchao Wang   blk-mq: sync the ...
374
  	/*
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
375
376
377
378
379
  	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
  	 * while the queue is frozen. So we can use q_usage_counter to avoid
  	 * racing with it. __blk_mq_update_nr_hw_queues() uses
  	 * synchronize_rcu() to ensure this function left the critical section
  	 * below.
f5bbbbe4d   Jianchao Wang   blk-mq: sync the ...
380
  	 */
530ca2c9b   Keith Busch   blk-mq: Allow blo...
381
  	if (!percpu_ref_tryget(&q->q_usage_counter))
f5bbbbe4d   Jianchao Wang   blk-mq: sync the ...
382
  		return;
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
383
384
385
386
387
  
  	queue_for_each_hw_ctx(q, hctx, i) {
  		struct blk_mq_tags *tags = hctx->tags;
  
  		/*
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
388
  		 * If no software queues are currently mapped to this
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
389
390
391
392
393
394
  		 * hardware queue, there's nothing to check
  		 */
  		if (!blk_mq_hw_queue_mapped(hctx))
  			continue;
  
  		if (tags->nr_reserved_tags)
88459642c   Omar Sandoval   blk-mq: abstract ...
395
396
  			bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
  		bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
4bb659b15   Jens Axboe   blk-mq: implement...
397
  	}
530ca2c9b   Keith Busch   blk-mq: Allow blo...
398
  	blk_queue_exit(q);
4bb659b15   Jens Axboe   blk-mq: implement...
399
  }
f4a644db8   Omar Sandoval   sbitmap: push all...
400
401
  static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
  		    bool round_robin, int node)
4bb659b15   Jens Axboe   blk-mq: implement...
402
  {
f4a644db8   Omar Sandoval   sbitmap: push all...
403
404
  	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
  				       node);
4bb659b15   Jens Axboe   blk-mq: implement...
405
406
407
  }
  
  static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
24391c0dc   Shaohua Li   blk-mq: add tag a...
408
  						   int node, int alloc_policy)
4bb659b15   Jens Axboe   blk-mq: implement...
409
410
  {
  	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
f4a644db8   Omar Sandoval   sbitmap: push all...
411
  	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
4bb659b15   Jens Axboe   blk-mq: implement...
412

f4a644db8   Omar Sandoval   sbitmap: push all...
413
  	if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
88459642c   Omar Sandoval   blk-mq: abstract ...
414
  		goto free_tags;
f4a644db8   Omar Sandoval   sbitmap: push all...
415
416
  	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
  		     node))
88459642c   Omar Sandoval   blk-mq: abstract ...
417
  		goto free_bitmap_tags;
4bb659b15   Jens Axboe   blk-mq: implement...
418
419
  
  	return tags;
88459642c   Omar Sandoval   blk-mq: abstract ...
420
421
422
  free_bitmap_tags:
  	sbitmap_queue_free(&tags->bitmap_tags);
  free_tags:
4bb659b15   Jens Axboe   blk-mq: implement...
423
424
425
  	kfree(tags);
  	return NULL;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
426
  struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
24391c0dc   Shaohua Li   blk-mq: add tag a...
427
428
  				     unsigned int reserved_tags,
  				     int node, int alloc_policy)
320ae51fe   Jens Axboe   blk-mq: new multi...
429
  {
320ae51fe   Jens Axboe   blk-mq: new multi...
430
  	struct blk_mq_tags *tags;
320ae51fe   Jens Axboe   blk-mq: new multi...
431
432
433
434
435
436
437
438
439
440
  
  	if (total_tags > BLK_MQ_TAG_MAX) {
  		pr_err("blk-mq: tag depth too large
  ");
  		return NULL;
  	}
  
  	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
  	if (!tags)
  		return NULL;
320ae51fe   Jens Axboe   blk-mq: new multi...
441
442
  	tags->nr_tags = total_tags;
  	tags->nr_reserved_tags = reserved_tags;
320ae51fe   Jens Axboe   blk-mq: new multi...
443

24391c0dc   Shaohua Li   blk-mq: add tag a...
444
  	return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
320ae51fe   Jens Axboe   blk-mq: new multi...
445
446
447
448
  }
  
  void blk_mq_free_tags(struct blk_mq_tags *tags)
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
449
450
  	sbitmap_queue_free(&tags->bitmap_tags);
  	sbitmap_queue_free(&tags->breserved_tags);
320ae51fe   Jens Axboe   blk-mq: new multi...
451
452
  	kfree(tags);
  }
70f36b600   Jens Axboe   blk-mq: allow res...
453
454
455
  int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
  			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
  			    bool can_grow)
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
456
  {
70f36b600   Jens Axboe   blk-mq: allow res...
457
458
459
  	struct blk_mq_tags *tags = *tagsptr;
  
  	if (tdepth <= tags->nr_reserved_tags)
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
460
461
462
  		return -EINVAL;
  
  	/*
70f36b600   Jens Axboe   blk-mq: allow res...
463
464
  	 * If we are allowed to grow beyond the original size, allocate
  	 * a new set of tags before freeing the old one.
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
465
  	 */
70f36b600   Jens Axboe   blk-mq: allow res...
466
467
468
469
470
471
472
473
474
475
476
477
478
479
  	if (tdepth > tags->nr_tags) {
  		struct blk_mq_tag_set *set = hctx->queue->tag_set;
  		struct blk_mq_tags *new;
  		bool ret;
  
  		if (!can_grow)
  			return -EINVAL;
  
  		/*
  		 * We need some sort of upper limit, set it high enough that
  		 * no valid use cases should require more.
  		 */
  		if (tdepth > 16 * BLKDEV_MAX_RQ)
  			return -EINVAL;
75d6e175f   Ming Lei   blk-mq: fix updat...
480
481
  		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
  				tags->nr_reserved_tags);
70f36b600   Jens Axboe   blk-mq: allow res...
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
  		if (!new)
  			return -ENOMEM;
  		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
  		if (ret) {
  			blk_mq_free_rq_map(new);
  			return -ENOMEM;
  		}
  
  		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
  		blk_mq_free_rq_map(*tagsptr);
  		*tagsptr = new;
  	} else {
  		/*
  		 * Don't need (or can't) update reserved tags here, they
  		 * remain static and should never need resizing.
  		 */
75d6e175f   Ming Lei   blk-mq: fix updat...
498
499
  		sbitmap_queue_resize(&tags->bitmap_tags,
  				tdepth - tags->nr_reserved_tags);
70f36b600   Jens Axboe   blk-mq: allow res...
500
  	}
88459642c   Omar Sandoval   blk-mq: abstract ...
501

e3a2b3f93   Jens Axboe   blk-mq: allow cha...
502
503
  	return 0;
  }
205fb5f5b   Bart Van Assche   blk-mq: add blk_m...
504
505
506
507
508
509
510
511
512
513
514
515
516
517
  /**
   * blk_mq_unique_tag() - return a tag that is unique queue-wide
   * @rq: request for which to compute a unique tag
   *
   * The tag field in struct request is unique per hardware queue but not over
   * all hardware queues. Hence this function that returns a tag with the
   * hardware context index in the upper bits and the per hardware queue tag in
   * the lower bits.
   *
   * Note: When called for a request that is queued on a non-multiqueue request
   * queue, the hardware context index is set to zero.
   */
  u32 blk_mq_unique_tag(struct request *rq)
  {
ea4f995ee   Jens Axboe   blk-mq: cache req...
518
  	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
205fb5f5b   Bart Van Assche   blk-mq: add blk_m...
519
520
521
  		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
  }
  EXPORT_SYMBOL(blk_mq_unique_tag);