Blame view

block/blk-mq-tag.c 15.1 KB
3dcf60bcb   Christoph Hellwig   block: add SPDX t...
1
  // SPDX-License-Identifier: GPL-2.0
75bb4625b   Jens Axboe   blk-mq: add file ...
2
  /*
88459642c   Omar Sandoval   blk-mq: abstract ...
3
4
5
   * Tag allocation using scalable bitmaps. Uses active queue tracking to support
   * fairer distribution of tags between multiple submitters when a shared tag map
   * is used.
75bb4625b   Jens Axboe   blk-mq: add file ...
6
7
8
   *
   * Copyright (C) 2013-2014 Jens Axboe
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
9
10
  #include <linux/kernel.h>
  #include <linux/module.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
11
12
  
  #include <linux/blk-mq.h>
f9934a80f   Ming Lei   blk-mq: introduce...
13
  #include <linux/delay.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
14
15
16
  #include "blk.h"
  #include "blk-mq.h"
  #include "blk-mq-tag.h"
320ae51fe   Jens Axboe   blk-mq: new multi...
17
18
  bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
  {
4bb659b15   Jens Axboe   blk-mq: implement...
19
20
  	if (!tags)
  		return true;
88459642c   Omar Sandoval   blk-mq: abstract ...
21
  	return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
0d2602ca3   Jens Axboe   blk-mq: improve s...
22
23
24
25
  }
  
  /*
   * If a previously inactive queue goes active, bump the active user count.
d263ed992   Jianchao Wang   blk-mq: count the...
26
27
28
   * We need to do this before try to allocate driver tag, then even if fail
   * to get tag when first time, the other shared-tag users could reserve
   * budget for it.
0d2602ca3   Jens Axboe   blk-mq: improve s...
29
30
31
32
33
34
35
36
37
38
39
   */
  bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
  {
  	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
  	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  		atomic_inc(&hctx->tags->active_queues);
  
  	return true;
  }
  
  /*
aed3ea94b   Jens Axboe   block: wake up wa...
40
   * Wakeup all potentially sleeping on tags
0d2602ca3   Jens Axboe   blk-mq: improve s...
41
   */
aed3ea94b   Jens Axboe   block: wake up wa...
42
  void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
0d2602ca3   Jens Axboe   blk-mq: improve s...
43
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
44
45
46
  	sbitmap_queue_wake_all(&tags->bitmap_tags);
  	if (include_reserve)
  		sbitmap_queue_wake_all(&tags->breserved_tags);
0d2602ca3   Jens Axboe   blk-mq: improve s...
47
48
49
  }
  
  /*
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
50
51
52
53
54
55
56
57
58
59
60
   * If a previously busy queue goes inactive, potential waiters could now
   * be allowed to queue. Wake them up and check.
   */
  void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
  {
  	struct blk_mq_tags *tags = hctx->tags;
  
  	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  		return;
  
  	atomic_dec(&tags->active_queues);
aed3ea94b   Jens Axboe   block: wake up wa...
61
  	blk_mq_tag_wakeup_all(tags, false);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
62
63
64
  }
  
  /*
0d2602ca3   Jens Axboe   blk-mq: improve s...
65
66
67
68
   * For shared tag users, we track the number of currently active users
   * and attempt to provide a fair share of the tag depth for each of them.
   */
  static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
88459642c   Omar Sandoval   blk-mq: abstract ...
69
  				  struct sbitmap_queue *bt)
0d2602ca3   Jens Axboe   blk-mq: improve s...
70
71
72
73
74
75
76
77
78
79
80
  {
  	unsigned int depth, users;
  
  	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
  		return true;
  	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  		return true;
  
  	/*
  	 * Don't try dividing an ant
  	 */
88459642c   Omar Sandoval   blk-mq: abstract ...
81
  	if (bt->sb.depth == 1)
0d2602ca3   Jens Axboe   blk-mq: improve s...
82
83
84
85
86
87
88
89
90
  		return true;
  
  	users = atomic_read(&hctx->tags->active_queues);
  	if (!users)
  		return true;
  
  	/*
  	 * Allow at least some tags
  	 */
88459642c   Omar Sandoval   blk-mq: abstract ...
91
  	depth = max((bt->sb.depth + users - 1) / users, 4U);
0d2602ca3   Jens Axboe   blk-mq: improve s...
92
93
  	return atomic_read(&hctx->nr_active) < depth;
  }
200e86b33   Jens Axboe   blk-mq: only appl...
94
95
  static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
  			    struct sbitmap_queue *bt)
4bb659b15   Jens Axboe   blk-mq: implement...
96
  {
200e86b33   Jens Axboe   blk-mq: only appl...
97
98
  	if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
  	    !hctx_may_queue(data->hctx, bt))
0d2602ca3   Jens Axboe   blk-mq: improve s...
99
  		return -1;
229a92873   Omar Sandoval   blk-mq: add shall...
100
101
102
103
  	if (data->shallow_depth)
  		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
  	else
  		return __sbitmap_queue_get(bt);
4bb659b15   Jens Axboe   blk-mq: implement...
104
  }
4941115be   Jens Axboe   blk-mq-tag: clean...
105
  unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
320ae51fe   Jens Axboe   blk-mq: new multi...
106
  {
4941115be   Jens Axboe   blk-mq-tag: clean...
107
108
  	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
  	struct sbitmap_queue *bt;
88459642c   Omar Sandoval   blk-mq: abstract ...
109
  	struct sbq_wait_state *ws;
5d2ee7122   Jens Axboe   sbitmap: optimize...
110
  	DEFINE_SBQ_WAIT(wait);
4941115be   Jens Axboe   blk-mq-tag: clean...
111
  	unsigned int tag_offset;
320ae51fe   Jens Axboe   blk-mq: new multi...
112
  	int tag;
4941115be   Jens Axboe   blk-mq-tag: clean...
113
114
115
116
117
118
119
120
121
122
123
  	if (data->flags & BLK_MQ_REQ_RESERVED) {
  		if (unlikely(!tags->nr_reserved_tags)) {
  			WARN_ON_ONCE(1);
  			return BLK_MQ_TAG_FAIL;
  		}
  		bt = &tags->breserved_tags;
  		tag_offset = 0;
  	} else {
  		bt = &tags->bitmap_tags;
  		tag_offset = tags->nr_reserved_tags;
  	}
200e86b33   Jens Axboe   blk-mq: only appl...
124
  	tag = __blk_mq_get_tag(data, bt);
4bb659b15   Jens Axboe   blk-mq: implement...
125
  	if (tag != -1)
4941115be   Jens Axboe   blk-mq-tag: clean...
126
  		goto found_tag;
4bb659b15   Jens Axboe   blk-mq: implement...
127

6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
128
  	if (data->flags & BLK_MQ_REQ_NOWAIT)
4941115be   Jens Axboe   blk-mq-tag: clean...
129
  		return BLK_MQ_TAG_FAIL;
4bb659b15   Jens Axboe   blk-mq: implement...
130

4941115be   Jens Axboe   blk-mq-tag: clean...
131
  	ws = bt_wait_ptr(bt, data->hctx);
4bb659b15   Jens Axboe   blk-mq: implement...
132
  	do {
e6fc46498   Ming Lei   blk-mq: avoid sta...
133
  		struct sbitmap_queue *bt_prev;
b32232073   Bart Van Assche   blk-mq: fix hang ...
134
135
136
  		/*
  		 * We're out of tags on this hardware queue, kick any
  		 * pending IO submits before going to sleep waiting for
8cecb07d7   Jens Axboe   blk-mq-tag: remov...
137
  		 * some to complete.
b32232073   Bart Van Assche   blk-mq: fix hang ...
138
  		 */
8cecb07d7   Jens Axboe   blk-mq-tag: remov...
139
  		blk_mq_run_hw_queue(data->hctx, false);
b32232073   Bart Van Assche   blk-mq: fix hang ...
140

080ff3511   Jens Axboe   blk-mq: re-check ...
141
142
143
144
  		/*
  		 * Retry tag allocation after running the hardware queue,
  		 * as running the queue may also have found completions.
  		 */
200e86b33   Jens Axboe   blk-mq: only appl...
145
  		tag = __blk_mq_get_tag(data, bt);
080ff3511   Jens Axboe   blk-mq: re-check ...
146
147
  		if (tag != -1)
  			break;
5d2ee7122   Jens Axboe   sbitmap: optimize...
148
  		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
4e5dff41b   Jens Axboe   blk-mq: improve h...
149
150
151
152
  
  		tag = __blk_mq_get_tag(data, bt);
  		if (tag != -1)
  			break;
e6fc46498   Ming Lei   blk-mq: avoid sta...
153
  		bt_prev = bt;
4bb659b15   Jens Axboe   blk-mq: implement...
154
  		io_schedule();
cb96a42cc   Ming Lei   blk-mq: fix sched...
155

5d2ee7122   Jens Axboe   sbitmap: optimize...
156
  		sbitmap_finish_wait(bt, ws, &wait);
cb96a42cc   Ming Lei   blk-mq: fix sched...
157
  		data->ctx = blk_mq_get_ctx(data->q);
f9afca4d3   Jens Axboe   blk-mq: pass in r...
158
  		data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
8ccdf4a37   Jianchao Wang   blk-mq: save queu...
159
  						data->ctx);
4941115be   Jens Axboe   blk-mq-tag: clean...
160
161
162
163
164
  		tags = blk_mq_tags_from_data(data);
  		if (data->flags & BLK_MQ_REQ_RESERVED)
  			bt = &tags->breserved_tags;
  		else
  			bt = &tags->bitmap_tags;
e6fc46498   Ming Lei   blk-mq: avoid sta...
165
166
167
168
169
170
171
  		/*
  		 * If destination hw queue is changed, fake wake up on
  		 * previous queue for compensating the wake up miss, so
  		 * other allocations on previous queue won't be starved.
  		 */
  		if (bt != bt_prev)
  			sbitmap_queue_wake_up(bt_prev);
4941115be   Jens Axboe   blk-mq-tag: clean...
172
  		ws = bt_wait_ptr(bt, data->hctx);
4bb659b15   Jens Axboe   blk-mq: implement...
173
  	} while (1);
5d2ee7122   Jens Axboe   sbitmap: optimize...
174
  	sbitmap_finish_wait(bt, ws, &wait);
320ae51fe   Jens Axboe   blk-mq: new multi...
175

4941115be   Jens Axboe   blk-mq-tag: clean...
176
177
  found_tag:
  	return tag + tag_offset;
320ae51fe   Jens Axboe   blk-mq: new multi...
178
  }
4941115be   Jens Axboe   blk-mq-tag: clean...
179
180
  void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
  		    struct blk_mq_ctx *ctx, unsigned int tag)
320ae51fe   Jens Axboe   blk-mq: new multi...
181
  {
415b806de   Sagi Grimberg   blk-mq-sched: All...
182
  	if (!blk_mq_tag_is_reserved(tags, tag)) {
4bb659b15   Jens Axboe   blk-mq: implement...
183
  		const int real_tag = tag - tags->nr_reserved_tags;
70114c393   Jens Axboe   blk-mq: cleanup t...
184
  		BUG_ON(real_tag >= tags->nr_tags);
f4a644db8   Omar Sandoval   sbitmap: push all...
185
  		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
70114c393   Jens Axboe   blk-mq: cleanup t...
186
187
  	} else {
  		BUG_ON(tag >= tags->nr_reserved_tags);
f4a644db8   Omar Sandoval   sbitmap: push all...
188
  		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
70114c393   Jens Axboe   blk-mq: cleanup t...
189
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
190
  }
88459642c   Omar Sandoval   blk-mq: abstract ...
191
192
193
194
195
196
197
198
  struct bt_iter_data {
  	struct blk_mq_hw_ctx *hctx;
  	busy_iter_fn *fn;
  	void *data;
  	bool reserved;
  };
  
  static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
320ae51fe   Jens Axboe   blk-mq: new multi...
199
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
200
201
202
203
  	struct bt_iter_data *iter_data = data;
  	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
  	struct blk_mq_tags *tags = hctx->tags;
  	bool reserved = iter_data->reserved;
81481eb42   Christoph Hellwig   blk-mq: fix and s...
204
  	struct request *rq;
4bb659b15   Jens Axboe   blk-mq: implement...
205

88459642c   Omar Sandoval   blk-mq: abstract ...
206
207
208
  	if (!reserved)
  		bitnr += tags->nr_reserved_tags;
  	rq = tags->rqs[bitnr];
4bb659b15   Jens Axboe   blk-mq: implement...
209

7f5562d5e   Jens Axboe   blk-mq-tag: check...
210
211
  	/*
  	 * We can hit rq == NULL here, because the tagging functions
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
212
  	 * test and set the bit before assigning ->rqs[].
7f5562d5e   Jens Axboe   blk-mq-tag: check...
213
214
  	 */
  	if (rq && rq->q == hctx->queue)
7baa85727   Jens Axboe   blk-mq-tag: chang...
215
  		return iter_data->fn(hctx, rq, iter_data->data, reserved);
88459642c   Omar Sandoval   blk-mq: abstract ...
216
217
  	return true;
  }
4bb659b15   Jens Axboe   blk-mq: implement...
218

c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
219
220
221
222
223
224
225
226
  /**
   * bt_for_each - iterate over the requests associated with a hardware queue
   * @hctx:	Hardware queue to examine.
   * @bt:		sbitmap to examine. This is either the breserved_tags member
   *		or the bitmap_tags member of struct blk_mq_tags.
   * @fn:		Pointer to the function that will be called for each request
   *		associated with @hctx that has been assigned a driver tag.
   *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
227
228
   *		where rq is a pointer to a request. Return true to continue
   *		iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
229
230
231
232
   * @data:	Will be passed as third argument to @fn.
   * @reserved:	Indicates whether @bt is the breserved_tags member or the
   *		bitmap_tags member of struct blk_mq_tags.
   */
88459642c   Omar Sandoval   blk-mq: abstract ...
233
234
235
236
237
238
239
240
241
242
243
  static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
  			busy_iter_fn *fn, void *data, bool reserved)
  {
  	struct bt_iter_data iter_data = {
  		.hctx = hctx,
  		.fn = fn,
  		.data = data,
  		.reserved = reserved,
  	};
  
  	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
320ae51fe   Jens Axboe   blk-mq: new multi...
244
  }
88459642c   Omar Sandoval   blk-mq: abstract ...
245
246
247
248
249
250
251
252
  struct bt_tags_iter_data {
  	struct blk_mq_tags *tags;
  	busy_tag_iter_fn *fn;
  	void *data;
  	bool reserved;
  };
  
  static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
f26cdc853   Keith Busch   blk-mq: Shared ta...
253
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
254
255
256
  	struct bt_tags_iter_data *iter_data = data;
  	struct blk_mq_tags *tags = iter_data->tags;
  	bool reserved = iter_data->reserved;
f26cdc853   Keith Busch   blk-mq: Shared ta...
257
  	struct request *rq;
f26cdc853   Keith Busch   blk-mq: Shared ta...
258

88459642c   Omar Sandoval   blk-mq: abstract ...
259
260
  	if (!reserved)
  		bitnr += tags->nr_reserved_tags;
7f5562d5e   Jens Axboe   blk-mq-tag: check...
261
262
263
264
265
  
  	/*
  	 * We can hit rq == NULL here, because the tagging functions
  	 * test and set the bit before assining ->rqs[].
  	 */
88459642c   Omar Sandoval   blk-mq: abstract ...
266
  	rq = tags->rqs[bitnr];
2d5ba0e2d   Ming Lei   blk-mq: fix blk_m...
267
  	if (rq && blk_mq_request_started(rq))
7baa85727   Jens Axboe   blk-mq-tag: chang...
268
  		return iter_data->fn(rq, iter_data->data, reserved);
f26cdc853   Keith Busch   blk-mq: Shared ta...
269

88459642c   Omar Sandoval   blk-mq: abstract ...
270
271
  	return true;
  }
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
272
273
274
275
276
277
278
  /**
   * bt_tags_for_each - iterate over the requests in a tag map
   * @tags:	Tag map to iterate over.
   * @bt:		sbitmap to examine. This is either the breserved_tags member
   *		or the bitmap_tags member of struct blk_mq_tags.
   * @fn:		Pointer to the function that will be called for each started
   *		request. @fn will be called as follows: @fn(rq, @data,
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
279
280
   *		@reserved) where rq is a pointer to a request. Return true
   *		to continue iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
281
282
283
284
   * @data:	Will be passed as second argument to @fn.
   * @reserved:	Indicates whether @bt is the breserved_tags member or the
   *		bitmap_tags member of struct blk_mq_tags.
   */
88459642c   Omar Sandoval   blk-mq: abstract ...
285
286
287
288
289
290
291
292
293
294
295
296
  static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
  			     busy_tag_iter_fn *fn, void *data, bool reserved)
  {
  	struct bt_tags_iter_data iter_data = {
  		.tags = tags,
  		.fn = fn,
  		.data = data,
  		.reserved = reserved,
  	};
  
  	if (tags->rqs)
  		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
f26cdc853   Keith Busch   blk-mq: Shared ta...
297
  }
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
298
299
300
301
302
303
  /**
   * blk_mq_all_tag_busy_iter - iterate over all started requests in a tag map
   * @tags:	Tag map to iterate over.
   * @fn:		Pointer to the function that will be called for each started
   *		request. @fn will be called as follows: @fn(rq, @priv,
   *		reserved) where rq is a pointer to a request. 'reserved'
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
304
305
   *		indicates whether or not @rq is a reserved request. Return
   *		true to continue iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
306
307
   * @priv:	Will be passed as second argument to @fn.
   */
e8f1e1630   Sagi Grimberg   blk-mq: Make blk_...
308
309
  static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
  		busy_tag_iter_fn *fn, void *priv)
f26cdc853   Keith Busch   blk-mq: Shared ta...
310
311
  {
  	if (tags->nr_reserved_tags)
88459642c   Omar Sandoval   blk-mq: abstract ...
312
313
  		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
  	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
f26cdc853   Keith Busch   blk-mq: Shared ta...
314
  }
f26cdc853   Keith Busch   blk-mq: Shared ta...
315

c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
316
317
318
319
320
321
  /**
   * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
   * @tagset:	Tag set to iterate over.
   * @fn:		Pointer to the function that will be called for each started
   *		request. @fn will be called as follows: @fn(rq, @priv,
   *		reserved) where rq is a pointer to a request. 'reserved'
ab11fe5af   Jens Axboe   blk-mq-tag: docum...
322
323
   *		indicates whether or not @rq is a reserved request. Return
   *		true to continue iterating tags, false to stop.
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
324
325
   * @priv:	Will be passed as second argument to @fn.
   */
e0489487e   Sagi Grimberg   blk-mq: Export ta...
326
327
328
329
330
331
332
333
334
335
336
  void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
  		busy_tag_iter_fn *fn, void *priv)
  {
  	int i;
  
  	for (i = 0; i < tagset->nr_hw_queues; i++) {
  		if (tagset->tags && tagset->tags[i])
  			blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
  	}
  }
  EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
f9934a80f   Ming Lei   blk-mq: introduce...
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
  static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
  		void *data, bool reserved)
  {
  	unsigned *count = data;
  
  	if (blk_mq_request_completed(rq))
  		(*count)++;
  	return true;
  }
  
  /**
   * blk_mq_tagset_wait_completed_request - wait until all completed req's
   * complete funtion is run
   * @tagset:	Tag set to drain completed request
   *
   * Note: This function has to be run after all IO queues are shutdown
   */
  void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
  {
  	while (true) {
  		unsigned count = 0;
  
  		blk_mq_tagset_busy_iter(tagset,
  				blk_mq_tagset_count_completed_rqs, &count);
  		if (!count)
  			break;
  		msleep(5);
  	}
  }
  EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
367
368
369
370
371
372
373
374
375
376
377
378
379
380
  /**
   * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
   * @q:		Request queue to examine.
   * @fn:		Pointer to the function that will be called for each request
   *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
   *		reserved) where rq is a pointer to a request and hctx points
   *		to the hardware queue associated with the request. 'reserved'
   *		indicates whether or not @rq is a reserved request.
   * @priv:	Will be passed as third argument to @fn.
   *
   * Note: if @q->tag_set is shared with other request queues then @fn will be
   * called for all requests on all queues that share that tag set and not only
   * for requests associated with @q.
   */
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
381
  void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
81481eb42   Christoph Hellwig   blk-mq: fix and s...
382
  		void *priv)
320ae51fe   Jens Axboe   blk-mq: new multi...
383
  {
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
384
385
  	struct blk_mq_hw_ctx *hctx;
  	int i;
f5bbbbe4d   Jianchao Wang   blk-mq: sync the ...
386
  	/*
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
387
388
389
390
391
  	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
  	 * while the queue is frozen. So we can use q_usage_counter to avoid
  	 * racing with it. __blk_mq_update_nr_hw_queues() uses
  	 * synchronize_rcu() to ensure this function left the critical section
  	 * below.
f5bbbbe4d   Jianchao Wang   blk-mq: sync the ...
392
  	 */
530ca2c9b   Keith Busch   blk-mq: Allow blo...
393
  	if (!percpu_ref_tryget(&q->q_usage_counter))
f5bbbbe4d   Jianchao Wang   blk-mq: sync the ...
394
  		return;
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
395
396
397
398
399
  
  	queue_for_each_hw_ctx(q, hctx, i) {
  		struct blk_mq_tags *tags = hctx->tags;
  
  		/*
c7b1bf5cc   Bart Van Assche   blk-mq: Document ...
400
  		 * If no software queues are currently mapped to this
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
401
402
403
404
405
406
  		 * hardware queue, there's nothing to check
  		 */
  		if (!blk_mq_hw_queue_mapped(hctx))
  			continue;
  
  		if (tags->nr_reserved_tags)
88459642c   Omar Sandoval   blk-mq: abstract ...
407
408
  			bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
  		bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
4bb659b15   Jens Axboe   blk-mq: implement...
409
  	}
530ca2c9b   Keith Busch   blk-mq: Allow blo...
410
  	blk_queue_exit(q);
4bb659b15   Jens Axboe   blk-mq: implement...
411
  }
f4a644db8   Omar Sandoval   sbitmap: push all...
412
413
  static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
  		    bool round_robin, int node)
4bb659b15   Jens Axboe   blk-mq: implement...
414
  {
f4a644db8   Omar Sandoval   sbitmap: push all...
415
416
  	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
  				       node);
4bb659b15   Jens Axboe   blk-mq: implement...
417
418
419
  }
  
  static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
24391c0dc   Shaohua Li   blk-mq: add tag a...
420
  						   int node, int alloc_policy)
4bb659b15   Jens Axboe   blk-mq: implement...
421
422
  {
  	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
f4a644db8   Omar Sandoval   sbitmap: push all...
423
  	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
4bb659b15   Jens Axboe   blk-mq: implement...
424

f4a644db8   Omar Sandoval   sbitmap: push all...
425
  	if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
88459642c   Omar Sandoval   blk-mq: abstract ...
426
  		goto free_tags;
f4a644db8   Omar Sandoval   sbitmap: push all...
427
428
  	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
  		     node))
88459642c   Omar Sandoval   blk-mq: abstract ...
429
  		goto free_bitmap_tags;
4bb659b15   Jens Axboe   blk-mq: implement...
430
431
  
  	return tags;
88459642c   Omar Sandoval   blk-mq: abstract ...
432
433
434
  free_bitmap_tags:
  	sbitmap_queue_free(&tags->bitmap_tags);
  free_tags:
4bb659b15   Jens Axboe   blk-mq: implement...
435
436
437
  	kfree(tags);
  	return NULL;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
438
  struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
24391c0dc   Shaohua Li   blk-mq: add tag a...
439
440
  				     unsigned int reserved_tags,
  				     int node, int alloc_policy)
320ae51fe   Jens Axboe   blk-mq: new multi...
441
  {
320ae51fe   Jens Axboe   blk-mq: new multi...
442
  	struct blk_mq_tags *tags;
320ae51fe   Jens Axboe   blk-mq: new multi...
443
444
445
446
447
448
449
450
451
452
  
  	if (total_tags > BLK_MQ_TAG_MAX) {
  		pr_err("blk-mq: tag depth too large
  ");
  		return NULL;
  	}
  
  	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
  	if (!tags)
  		return NULL;
320ae51fe   Jens Axboe   blk-mq: new multi...
453
454
  	tags->nr_tags = total_tags;
  	tags->nr_reserved_tags = reserved_tags;
320ae51fe   Jens Axboe   blk-mq: new multi...
455

24391c0dc   Shaohua Li   blk-mq: add tag a...
456
  	return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
320ae51fe   Jens Axboe   blk-mq: new multi...
457
458
459
460
  }
  
  void blk_mq_free_tags(struct blk_mq_tags *tags)
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
461
462
  	sbitmap_queue_free(&tags->bitmap_tags);
  	sbitmap_queue_free(&tags->breserved_tags);
320ae51fe   Jens Axboe   blk-mq: new multi...
463
464
  	kfree(tags);
  }
70f36b600   Jens Axboe   blk-mq: allow res...
465
466
467
  int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
  			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
  			    bool can_grow)
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
468
  {
70f36b600   Jens Axboe   blk-mq: allow res...
469
470
471
  	struct blk_mq_tags *tags = *tagsptr;
  
  	if (tdepth <= tags->nr_reserved_tags)
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
472
473
474
  		return -EINVAL;
  
  	/*
70f36b600   Jens Axboe   blk-mq: allow res...
475
476
  	 * If we are allowed to grow beyond the original size, allocate
  	 * a new set of tags before freeing the old one.
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
477
  	 */
70f36b600   Jens Axboe   blk-mq: allow res...
478
479
480
481
482
483
484
485
486
487
488
489
490
491
  	if (tdepth > tags->nr_tags) {
  		struct blk_mq_tag_set *set = hctx->queue->tag_set;
  		struct blk_mq_tags *new;
  		bool ret;
  
  		if (!can_grow)
  			return -EINVAL;
  
  		/*
  		 * We need some sort of upper limit, set it high enough that
  		 * no valid use cases should require more.
  		 */
  		if (tdepth > 16 * BLKDEV_MAX_RQ)
  			return -EINVAL;
75d6e175f   Ming Lei   blk-mq: fix updat...
492
493
  		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
  				tags->nr_reserved_tags);
70f36b600   Jens Axboe   blk-mq: allow res...
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
  		if (!new)
  			return -ENOMEM;
  		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
  		if (ret) {
  			blk_mq_free_rq_map(new);
  			return -ENOMEM;
  		}
  
  		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
  		blk_mq_free_rq_map(*tagsptr);
  		*tagsptr = new;
  	} else {
  		/*
  		 * Don't need (or can't) update reserved tags here, they
  		 * remain static and should never need resizing.
  		 */
75d6e175f   Ming Lei   blk-mq: fix updat...
510
511
  		sbitmap_queue_resize(&tags->bitmap_tags,
  				tdepth - tags->nr_reserved_tags);
70f36b600   Jens Axboe   blk-mq: allow res...
512
  	}
88459642c   Omar Sandoval   blk-mq: abstract ...
513

e3a2b3f93   Jens Axboe   blk-mq: allow cha...
514
515
  	return 0;
  }
205fb5f5b   Bart Van Assche   blk-mq: add blk_m...
516
517
518
519
520
521
522
523
524
525
526
527
528
529
  /**
   * blk_mq_unique_tag() - return a tag that is unique queue-wide
   * @rq: request for which to compute a unique tag
   *
   * The tag field in struct request is unique per hardware queue but not over
   * all hardware queues. Hence this function that returns a tag with the
   * hardware context index in the upper bits and the per hardware queue tag in
   * the lower bits.
   *
   * Note: When called for a request that is queued on a non-multiqueue request
   * queue, the hardware context index is set to zero.
   */
  u32 blk_mq_unique_tag(struct request *rq)
  {
ea4f995ee   Jens Axboe   blk-mq: cache req...
530
  	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
205fb5f5b   Bart Van Assche   blk-mq: add blk_m...
531
532
533
  		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
  }
  EXPORT_SYMBOL(blk_mq_unique_tag);