Blame view

block/blk-mq-tag.c 11.7 KB
75bb4625b   Jens Axboe   blk-mq: add file ...
1
  /*
88459642c   Omar Sandoval   blk-mq: abstract ...
2
3
4
   * Tag allocation using scalable bitmaps. Uses active queue tracking to support
   * fairer distribution of tags between multiple submitters when a shared tag map
   * is used.
75bb4625b   Jens Axboe   blk-mq: add file ...
5
6
7
   *
   * Copyright (C) 2013-2014 Jens Axboe
   */
320ae51fe   Jens Axboe   blk-mq: new multi...
8
9
  #include <linux/kernel.h>
  #include <linux/module.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
10
11
12
13
14
  
  #include <linux/blk-mq.h>
  #include "blk.h"
  #include "blk-mq.h"
  #include "blk-mq-tag.h"
320ae51fe   Jens Axboe   blk-mq: new multi...
15
16
  bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
  {
4bb659b15   Jens Axboe   blk-mq: implement...
17
18
  	if (!tags)
  		return true;
88459642c   Omar Sandoval   blk-mq: abstract ...
19
  	return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
0d2602ca3   Jens Axboe   blk-mq: improve s...
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
  }
  
  /*
   * If a previously inactive queue goes active, bump the active user count.
   */
  bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
  {
  	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
  	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  		atomic_inc(&hctx->tags->active_queues);
  
  	return true;
  }
  
  /*
aed3ea94b   Jens Axboe   block: wake up wa...
35
   * Wakeup all potentially sleeping on tags
0d2602ca3   Jens Axboe   blk-mq: improve s...
36
   */
aed3ea94b   Jens Axboe   block: wake up wa...
37
  void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
0d2602ca3   Jens Axboe   blk-mq: improve s...
38
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
39
40
41
  	sbitmap_queue_wake_all(&tags->bitmap_tags);
  	if (include_reserve)
  		sbitmap_queue_wake_all(&tags->breserved_tags);
0d2602ca3   Jens Axboe   blk-mq: improve s...
42
43
44
  }
  
  /*
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
45
46
47
48
49
50
51
52
53
54
55
   * If a previously busy queue goes inactive, potential waiters could now
   * be allowed to queue. Wake them up and check.
   */
  void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
  {
  	struct blk_mq_tags *tags = hctx->tags;
  
  	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  		return;
  
  	atomic_dec(&tags->active_queues);
aed3ea94b   Jens Axboe   block: wake up wa...
56
  	blk_mq_tag_wakeup_all(tags, false);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
57
58
59
  }
  
  /*
0d2602ca3   Jens Axboe   blk-mq: improve s...
60
61
62
63
   * For shared tag users, we track the number of currently active users
   * and attempt to provide a fair share of the tag depth for each of them.
   */
  static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
88459642c   Omar Sandoval   blk-mq: abstract ...
64
  				  struct sbitmap_queue *bt)
0d2602ca3   Jens Axboe   blk-mq: improve s...
65
66
67
68
69
70
71
72
73
74
75
  {
  	unsigned int depth, users;
  
  	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
  		return true;
  	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  		return true;
  
  	/*
  	 * Don't try dividing an ant
  	 */
88459642c   Omar Sandoval   blk-mq: abstract ...
76
  	if (bt->sb.depth == 1)
0d2602ca3   Jens Axboe   blk-mq: improve s...
77
78
79
80
81
82
83
84
85
  		return true;
  
  	users = atomic_read(&hctx->tags->active_queues);
  	if (!users)
  		return true;
  
  	/*
  	 * Allow at least some tags
  	 */
88459642c   Omar Sandoval   blk-mq: abstract ...
86
  	depth = max((bt->sb.depth + users - 1) / users, 4U);
0d2602ca3   Jens Axboe   blk-mq: improve s...
87
88
  	return atomic_read(&hctx->nr_active) < depth;
  }
200e86b33   Jens Axboe   blk-mq: only appl...
89
90
  static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
  			    struct sbitmap_queue *bt)
4bb659b15   Jens Axboe   blk-mq: implement...
91
  {
200e86b33   Jens Axboe   blk-mq: only appl...
92
93
  	if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
  	    !hctx_may_queue(data->hctx, bt))
0d2602ca3   Jens Axboe   blk-mq: improve s...
94
  		return -1;
229a92873   Omar Sandoval   blk-mq: add shall...
95
96
97
98
  	if (data->shallow_depth)
  		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
  	else
  		return __sbitmap_queue_get(bt);
4bb659b15   Jens Axboe   blk-mq: implement...
99
  }
4941115be   Jens Axboe   blk-mq-tag: clean...
100
  unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
320ae51fe   Jens Axboe   blk-mq: new multi...
101
  {
4941115be   Jens Axboe   blk-mq-tag: clean...
102
103
  	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
  	struct sbitmap_queue *bt;
88459642c   Omar Sandoval   blk-mq: abstract ...
104
  	struct sbq_wait_state *ws;
4bb659b15   Jens Axboe   blk-mq: implement...
105
  	DEFINE_WAIT(wait);
4941115be   Jens Axboe   blk-mq-tag: clean...
106
  	unsigned int tag_offset;
bd6737f1a   Jens Axboe   blk-mq-sched: add...
107
  	bool drop_ctx;
320ae51fe   Jens Axboe   blk-mq: new multi...
108
  	int tag;
4941115be   Jens Axboe   blk-mq-tag: clean...
109
110
111
112
113
114
115
116
117
118
119
  	if (data->flags & BLK_MQ_REQ_RESERVED) {
  		if (unlikely(!tags->nr_reserved_tags)) {
  			WARN_ON_ONCE(1);
  			return BLK_MQ_TAG_FAIL;
  		}
  		bt = &tags->breserved_tags;
  		tag_offset = 0;
  	} else {
  		bt = &tags->bitmap_tags;
  		tag_offset = tags->nr_reserved_tags;
  	}
200e86b33   Jens Axboe   blk-mq: only appl...
120
  	tag = __blk_mq_get_tag(data, bt);
4bb659b15   Jens Axboe   blk-mq: implement...
121
  	if (tag != -1)
4941115be   Jens Axboe   blk-mq-tag: clean...
122
  		goto found_tag;
4bb659b15   Jens Axboe   blk-mq: implement...
123

6f3b0e8bc   Christoph Hellwig   blk-mq: add a fla...
124
  	if (data->flags & BLK_MQ_REQ_NOWAIT)
4941115be   Jens Axboe   blk-mq-tag: clean...
125
  		return BLK_MQ_TAG_FAIL;
4bb659b15   Jens Axboe   blk-mq: implement...
126

4941115be   Jens Axboe   blk-mq-tag: clean...
127
  	ws = bt_wait_ptr(bt, data->hctx);
bd6737f1a   Jens Axboe   blk-mq-sched: add...
128
  	drop_ctx = data->ctx == NULL;
4bb659b15   Jens Axboe   blk-mq: implement...
129
  	do {
e6fc46498   Ming Lei   blk-mq: avoid sta...
130
  		struct sbitmap_queue *bt_prev;
b32232073   Bart Van Assche   blk-mq: fix hang ...
131
132
133
  		/*
  		 * We're out of tags on this hardware queue, kick any
  		 * pending IO submits before going to sleep waiting for
8cecb07d7   Jens Axboe   blk-mq-tag: remov...
134
  		 * some to complete.
b32232073   Bart Van Assche   blk-mq: fix hang ...
135
  		 */
8cecb07d7   Jens Axboe   blk-mq-tag: remov...
136
  		blk_mq_run_hw_queue(data->hctx, false);
b32232073   Bart Van Assche   blk-mq: fix hang ...
137

080ff3511   Jens Axboe   blk-mq: re-check ...
138
139
140
141
  		/*
  		 * Retry tag allocation after running the hardware queue,
  		 * as running the queue may also have found completions.
  		 */
200e86b33   Jens Axboe   blk-mq: only appl...
142
  		tag = __blk_mq_get_tag(data, bt);
080ff3511   Jens Axboe   blk-mq: re-check ...
143
144
  		if (tag != -1)
  			break;
4e5dff41b   Jens Axboe   blk-mq: improve h...
145
146
147
148
149
150
  		prepare_to_wait_exclusive(&ws->wait, &wait,
  						TASK_UNINTERRUPTIBLE);
  
  		tag = __blk_mq_get_tag(data, bt);
  		if (tag != -1)
  			break;
bd6737f1a   Jens Axboe   blk-mq-sched: add...
151
152
  		if (data->ctx)
  			blk_mq_put_ctx(data->ctx);
cb96a42cc   Ming Lei   blk-mq: fix sched...
153

e6fc46498   Ming Lei   blk-mq: avoid sta...
154
  		bt_prev = bt;
4bb659b15   Jens Axboe   blk-mq: implement...
155
  		io_schedule();
cb96a42cc   Ming Lei   blk-mq: fix sched...
156
157
  
  		data->ctx = blk_mq_get_ctx(data->q);
7d7e0f90b   Christoph Hellwig   blk-mq: remove ->...
158
  		data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
4941115be   Jens Axboe   blk-mq-tag: clean...
159
160
161
162
163
  		tags = blk_mq_tags_from_data(data);
  		if (data->flags & BLK_MQ_REQ_RESERVED)
  			bt = &tags->breserved_tags;
  		else
  			bt = &tags->bitmap_tags;
88459642c   Omar Sandoval   blk-mq: abstract ...
164
  		finish_wait(&ws->wait, &wait);
e6fc46498   Ming Lei   blk-mq: avoid sta...
165
166
167
168
169
170
171
172
  
  		/*
  		 * If destination hw queue is changed, fake wake up on
  		 * previous queue for compensating the wake up miss, so
  		 * other allocations on previous queue won't be starved.
  		 */
  		if (bt != bt_prev)
  			sbitmap_queue_wake_up(bt_prev);
4941115be   Jens Axboe   blk-mq-tag: clean...
173
  		ws = bt_wait_ptr(bt, data->hctx);
4bb659b15   Jens Axboe   blk-mq: implement...
174
  	} while (1);
bd6737f1a   Jens Axboe   blk-mq-sched: add...
175
176
  	if (drop_ctx && data->ctx)
  		blk_mq_put_ctx(data->ctx);
88459642c   Omar Sandoval   blk-mq: abstract ...
177
  	finish_wait(&ws->wait, &wait);
320ae51fe   Jens Axboe   blk-mq: new multi...
178

4941115be   Jens Axboe   blk-mq-tag: clean...
179
180
  found_tag:
  	return tag + tag_offset;
320ae51fe   Jens Axboe   blk-mq: new multi...
181
  }
4941115be   Jens Axboe   blk-mq-tag: clean...
182
183
  void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
  		    struct blk_mq_ctx *ctx, unsigned int tag)
320ae51fe   Jens Axboe   blk-mq: new multi...
184
  {
415b806de   Sagi Grimberg   blk-mq-sched: All...
185
  	if (!blk_mq_tag_is_reserved(tags, tag)) {
4bb659b15   Jens Axboe   blk-mq: implement...
186
  		const int real_tag = tag - tags->nr_reserved_tags;
70114c393   Jens Axboe   blk-mq: cleanup t...
187
  		BUG_ON(real_tag >= tags->nr_tags);
f4a644db8   Omar Sandoval   sbitmap: push all...
188
  		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
70114c393   Jens Axboe   blk-mq: cleanup t...
189
190
  	} else {
  		BUG_ON(tag >= tags->nr_reserved_tags);
f4a644db8   Omar Sandoval   sbitmap: push all...
191
  		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
70114c393   Jens Axboe   blk-mq: cleanup t...
192
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
193
  }
88459642c   Omar Sandoval   blk-mq: abstract ...
194
195
196
197
198
199
200
201
  struct bt_iter_data {
  	struct blk_mq_hw_ctx *hctx;
  	busy_iter_fn *fn;
  	void *data;
  	bool reserved;
  };
  
  static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
320ae51fe   Jens Axboe   blk-mq: new multi...
202
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
203
204
205
206
  	struct bt_iter_data *iter_data = data;
  	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
  	struct blk_mq_tags *tags = hctx->tags;
  	bool reserved = iter_data->reserved;
81481eb42   Christoph Hellwig   blk-mq: fix and s...
207
  	struct request *rq;
4bb659b15   Jens Axboe   blk-mq: implement...
208

88459642c   Omar Sandoval   blk-mq: abstract ...
209
210
211
  	if (!reserved)
  		bitnr += tags->nr_reserved_tags;
  	rq = tags->rqs[bitnr];
4bb659b15   Jens Axboe   blk-mq: implement...
212

7f5562d5e   Jens Axboe   blk-mq-tag: check...
213
214
215
216
217
  	/*
  	 * We can hit rq == NULL here, because the tagging functions
  	 * test and set the bit before assining ->rqs[].
  	 */
  	if (rq && rq->q == hctx->queue)
88459642c   Omar Sandoval   blk-mq: abstract ...
218
219
220
  		iter_data->fn(hctx, rq, iter_data->data, reserved);
  	return true;
  }
4bb659b15   Jens Axboe   blk-mq: implement...
221

88459642c   Omar Sandoval   blk-mq: abstract ...
222
223
224
225
226
227
228
229
230
231
232
  static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
  			busy_iter_fn *fn, void *data, bool reserved)
  {
  	struct bt_iter_data iter_data = {
  		.hctx = hctx,
  		.fn = fn,
  		.data = data,
  		.reserved = reserved,
  	};
  
  	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
320ae51fe   Jens Axboe   blk-mq: new multi...
233
  }
88459642c   Omar Sandoval   blk-mq: abstract ...
234
235
236
237
238
239
240
241
  struct bt_tags_iter_data {
  	struct blk_mq_tags *tags;
  	busy_tag_iter_fn *fn;
  	void *data;
  	bool reserved;
  };
  
  static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
f26cdc853   Keith Busch   blk-mq: Shared ta...
242
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
243
244
245
  	struct bt_tags_iter_data *iter_data = data;
  	struct blk_mq_tags *tags = iter_data->tags;
  	bool reserved = iter_data->reserved;
f26cdc853   Keith Busch   blk-mq: Shared ta...
246
  	struct request *rq;
f26cdc853   Keith Busch   blk-mq: Shared ta...
247

88459642c   Omar Sandoval   blk-mq: abstract ...
248
249
  	if (!reserved)
  		bitnr += tags->nr_reserved_tags;
7f5562d5e   Jens Axboe   blk-mq-tag: check...
250
251
252
253
254
  
  	/*
  	 * We can hit rq == NULL here, because the tagging functions
  	 * test and set the bit before assining ->rqs[].
  	 */
88459642c   Omar Sandoval   blk-mq: abstract ...
255
  	rq = tags->rqs[bitnr];
d250bf4e7   Christoph Hellwig   blk-mq: only iter...
256
  	if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
7f5562d5e   Jens Axboe   blk-mq-tag: check...
257
  		iter_data->fn(rq, iter_data->data, reserved);
f26cdc853   Keith Busch   blk-mq: Shared ta...
258

88459642c   Omar Sandoval   blk-mq: abstract ...
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
  	return true;
  }
  
  static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
  			     busy_tag_iter_fn *fn, void *data, bool reserved)
  {
  	struct bt_tags_iter_data iter_data = {
  		.tags = tags,
  		.fn = fn,
  		.data = data,
  		.reserved = reserved,
  	};
  
  	if (tags->rqs)
  		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
f26cdc853   Keith Busch   blk-mq: Shared ta...
274
  }
e8f1e1630   Sagi Grimberg   blk-mq: Make blk_...
275
276
  static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
  		busy_tag_iter_fn *fn, void *priv)
f26cdc853   Keith Busch   blk-mq: Shared ta...
277
278
  {
  	if (tags->nr_reserved_tags)
88459642c   Omar Sandoval   blk-mq: abstract ...
279
280
  		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
  	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
f26cdc853   Keith Busch   blk-mq: Shared ta...
281
  }
f26cdc853   Keith Busch   blk-mq: Shared ta...
282

e0489487e   Sagi Grimberg   blk-mq: Export ta...
283
284
285
286
287
288
289
290
291
292
293
  void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
  		busy_tag_iter_fn *fn, void *priv)
  {
  	int i;
  
  	for (i = 0; i < tagset->nr_hw_queues; i++) {
  		if (tagset->tags && tagset->tags[i])
  			blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
  	}
  }
  EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
149e10f8f   Sagi Grimberg   block: introduce ...
294
295
  int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
  			 int (fn)(void *, struct request *))
486cf9899   Sagi Grimberg   blk-mq: Introduce...
296
297
  {
  	int i, j, ret = 0;
149e10f8f   Sagi Grimberg   block: introduce ...
298
  	if (WARN_ON_ONCE(!fn))
486cf9899   Sagi Grimberg   blk-mq: Introduce...
299
300
301
302
  		goto out;
  
  	for (i = 0; i < set->nr_hw_queues; i++) {
  		struct blk_mq_tags *tags = set->tags[i];
0067d4b02   Sagi Grimberg   blk-mq: Fix tagse...
303
304
  		if (!tags)
  			continue;
486cf9899   Sagi Grimberg   blk-mq: Introduce...
305
  		for (j = 0; j < tags->nr_tags; j++) {
2af8cbe30   Jens Axboe   blk-mq: split tag...
306
  			if (!tags->static_rqs[j])
486cf9899   Sagi Grimberg   blk-mq: Introduce...
307
  				continue;
149e10f8f   Sagi Grimberg   block: introduce ...
308
  			ret = fn(data, tags->static_rqs[j]);
486cf9899   Sagi Grimberg   blk-mq: Introduce...
309
310
311
312
313
314
315
316
  			if (ret)
  				goto out;
  		}
  	}
  
  out:
  	return ret;
  }
149e10f8f   Sagi Grimberg   block: introduce ...
317
  EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
318
  void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
81481eb42   Christoph Hellwig   blk-mq: fix and s...
319
  		void *priv)
320ae51fe   Jens Axboe   blk-mq: new multi...
320
  {
0bf6cd5b9   Christoph Hellwig   blk-mq: factor ou...
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
  	struct blk_mq_hw_ctx *hctx;
  	int i;
  
  
  	queue_for_each_hw_ctx(q, hctx, i) {
  		struct blk_mq_tags *tags = hctx->tags;
  
  		/*
  		 * If not software queues are currently mapped to this
  		 * hardware queue, there's nothing to check
  		 */
  		if (!blk_mq_hw_queue_mapped(hctx))
  			continue;
  
  		if (tags->nr_reserved_tags)
88459642c   Omar Sandoval   blk-mq: abstract ...
336
337
  			bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
  		bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
4bb659b15   Jens Axboe   blk-mq: implement...
338
  	}
4bb659b15   Jens Axboe   blk-mq: implement...
339
  }
f4a644db8   Omar Sandoval   sbitmap: push all...
340
341
  static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
  		    bool round_robin, int node)
4bb659b15   Jens Axboe   blk-mq: implement...
342
  {
f4a644db8   Omar Sandoval   sbitmap: push all...
343
344
  	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
  				       node);
4bb659b15   Jens Axboe   blk-mq: implement...
345
346
347
  }
  
  static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
24391c0dc   Shaohua Li   blk-mq: add tag a...
348
  						   int node, int alloc_policy)
4bb659b15   Jens Axboe   blk-mq: implement...
349
350
  {
  	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
f4a644db8   Omar Sandoval   sbitmap: push all...
351
  	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
4bb659b15   Jens Axboe   blk-mq: implement...
352

f4a644db8   Omar Sandoval   sbitmap: push all...
353
  	if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
88459642c   Omar Sandoval   blk-mq: abstract ...
354
  		goto free_tags;
f4a644db8   Omar Sandoval   sbitmap: push all...
355
356
  	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
  		     node))
88459642c   Omar Sandoval   blk-mq: abstract ...
357
  		goto free_bitmap_tags;
4bb659b15   Jens Axboe   blk-mq: implement...
358
359
  
  	return tags;
88459642c   Omar Sandoval   blk-mq: abstract ...
360
361
362
  free_bitmap_tags:
  	sbitmap_queue_free(&tags->bitmap_tags);
  free_tags:
4bb659b15   Jens Axboe   blk-mq: implement...
363
364
365
  	kfree(tags);
  	return NULL;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
366
  struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
24391c0dc   Shaohua Li   blk-mq: add tag a...
367
368
  				     unsigned int reserved_tags,
  				     int node, int alloc_policy)
320ae51fe   Jens Axboe   blk-mq: new multi...
369
  {
320ae51fe   Jens Axboe   blk-mq: new multi...
370
  	struct blk_mq_tags *tags;
320ae51fe   Jens Axboe   blk-mq: new multi...
371
372
373
374
375
376
377
378
379
380
  
  	if (total_tags > BLK_MQ_TAG_MAX) {
  		pr_err("blk-mq: tag depth too large
  ");
  		return NULL;
  	}
  
  	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
  	if (!tags)
  		return NULL;
320ae51fe   Jens Axboe   blk-mq: new multi...
381
382
  	tags->nr_tags = total_tags;
  	tags->nr_reserved_tags = reserved_tags;
320ae51fe   Jens Axboe   blk-mq: new multi...
383

24391c0dc   Shaohua Li   blk-mq: add tag a...
384
  	return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
320ae51fe   Jens Axboe   blk-mq: new multi...
385
386
387
388
  }
  
  void blk_mq_free_tags(struct blk_mq_tags *tags)
  {
88459642c   Omar Sandoval   blk-mq: abstract ...
389
390
  	sbitmap_queue_free(&tags->bitmap_tags);
  	sbitmap_queue_free(&tags->breserved_tags);
320ae51fe   Jens Axboe   blk-mq: new multi...
391
392
  	kfree(tags);
  }
70f36b600   Jens Axboe   blk-mq: allow res...
393
394
395
  int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
  			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
  			    bool can_grow)
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
396
  {
70f36b600   Jens Axboe   blk-mq: allow res...
397
398
399
  	struct blk_mq_tags *tags = *tagsptr;
  
  	if (tdepth <= tags->nr_reserved_tags)
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
400
  		return -EINVAL;
70f36b600   Jens Axboe   blk-mq: allow res...
401
  	tdepth -= tags->nr_reserved_tags;
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
402
  	/*
70f36b600   Jens Axboe   blk-mq: allow res...
403
404
  	 * If we are allowed to grow beyond the original size, allocate
  	 * a new set of tags before freeing the old one.
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
405
  	 */
70f36b600   Jens Axboe   blk-mq: allow res...
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
  	if (tdepth > tags->nr_tags) {
  		struct blk_mq_tag_set *set = hctx->queue->tag_set;
  		struct blk_mq_tags *new;
  		bool ret;
  
  		if (!can_grow)
  			return -EINVAL;
  
  		/*
  		 * We need some sort of upper limit, set it high enough that
  		 * no valid use cases should require more.
  		 */
  		if (tdepth > 16 * BLKDEV_MAX_RQ)
  			return -EINVAL;
  
  		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
  		if (!new)
  			return -ENOMEM;
  		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
  		if (ret) {
  			blk_mq_free_rq_map(new);
  			return -ENOMEM;
  		}
  
  		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
  		blk_mq_free_rq_map(*tagsptr);
  		*tagsptr = new;
  	} else {
  		/*
  		 * Don't need (or can't) update reserved tags here, they
  		 * remain static and should never need resizing.
  		 */
  		sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
  	}
88459642c   Omar Sandoval   blk-mq: abstract ...
440

e3a2b3f93   Jens Axboe   blk-mq: allow cha...
441
442
  	return 0;
  }
205fb5f5b   Bart Van Assche   blk-mq: add blk_m...
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
  /**
   * blk_mq_unique_tag() - return a tag that is unique queue-wide
   * @rq: request for which to compute a unique tag
   *
   * The tag field in struct request is unique per hardware queue but not over
   * all hardware queues. Hence this function that returns a tag with the
   * hardware context index in the upper bits and the per hardware queue tag in
   * the lower bits.
   *
   * Note: When called for a request that is queued on a non-multiqueue request
   * queue, the hardware context index is set to zero.
   */
  u32 blk_mq_unique_tag(struct request *rq)
  {
  	struct request_queue *q = rq->q;
  	struct blk_mq_hw_ctx *hctx;
  	int hwq = 0;
  
  	if (q->mq_ops) {
7d7e0f90b   Christoph Hellwig   blk-mq: remove ->...
462
  		hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
205fb5f5b   Bart Van Assche   blk-mq: add blk_m...
463
464
465
466
467
468
469
  		hwq = hctx->queue_num;
  	}
  
  	return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
  		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
  }
  EXPORT_SYMBOL(blk_mq_unique_tag);