Blame view
block/blk-mq-tag.c
17.7 KB
3dcf60bcb
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
75bb4625b
|
2 |
/* |
88459642c
|
3 4 5 |
* Tag allocation using scalable bitmaps. Uses active queue tracking to support * fairer distribution of tags between multiple submitters when a shared tag map * is used. |
75bb4625b
|
6 7 8 |
* * Copyright (C) 2013-2014 Jens Axboe */ |
320ae51fe
|
9 10 |
#include <linux/kernel.h> #include <linux/module.h> |
320ae51fe
|
11 12 |
#include <linux/blk-mq.h> |
f9934a80f
|
13 |
#include <linux/delay.h> |
320ae51fe
|
14 15 |
#include "blk.h" #include "blk-mq.h" |
d97e594c5
|
16 |
#include "blk-mq-sched.h" |
320ae51fe
|
17 |
#include "blk-mq-tag.h" |
0d2602ca3
|
18 19 |
/* * If a previously inactive queue goes active, bump the active user count. |
d263ed992
|
20 21 22 |
* We need to do this before try to allocate driver tag, then even if fail * to get tag when first time, the other shared-tag users could reserve * budget for it. |
0d2602ca3
|
23 24 25 |
*/ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { |
f1b49fdc1
|
26 27 28 29 30 31 32 33 34 35 36 37 |
if (blk_mq_is_sbitmap_shared(hctx->flags)) { struct request_queue *q = hctx->queue; struct blk_mq_tag_set *set = q->tag_set; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) && !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) atomic_inc(&set->active_queues_shared_sbitmap); } else { if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) atomic_inc(&hctx->tags->active_queues); } |
0d2602ca3
|
38 39 40 41 42 |
return true; } /* |
aed3ea94b
|
43 |
* Wakeup all potentially sleeping on tags |
0d2602ca3
|
44 |
*/ |
aed3ea94b
|
45 |
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
0d2602ca3
|
46 |
{ |
222a5ae03
|
47 |
sbitmap_queue_wake_all(tags->bitmap_tags); |
88459642c
|
48 |
if (include_reserve) |
222a5ae03
|
49 |
sbitmap_queue_wake_all(tags->breserved_tags); |
0d2602ca3
|
50 51 52 |
} /* |
e3a2b3f93
|
53 54 55 56 57 58 |
* If a previously busy queue goes inactive, potential waiters could now * be allowed to queue. Wake them up and check. */ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) { struct blk_mq_tags *tags = hctx->tags; |
f1b49fdc1
|
59 60 |
struct request_queue *q = hctx->queue; struct blk_mq_tag_set *set = q->tag_set; |
e3a2b3f93
|
61 |
|
f1b49fdc1
|
62 63 64 65 66 67 68 69 70 71 |
if (blk_mq_is_sbitmap_shared(hctx->flags)) { if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return; atomic_dec(&set->active_queues_shared_sbitmap); } else { if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return; atomic_dec(&tags->active_queues); } |
e3a2b3f93
|
72 |
|
aed3ea94b
|
73 |
blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f93
|
74 |
} |
200e86b33
|
75 76 |
static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt) |
4bb659b15
|
77 |
{ |
285008501
|
78 79 |
if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) && !hctx_may_queue(data->hctx, bt)) |
766473681
|
80 |
return BLK_MQ_NO_TAG; |
42fdc5e49
|
81 |
|
229a92873
|
82 83 84 85 |
if (data->shallow_depth) return __sbitmap_queue_get_shallow(bt, data->shallow_depth); else return __sbitmap_queue_get(bt); |
4bb659b15
|
86 |
} |
4941115be
|
87 |
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
320ae51fe
|
88 |
{ |
4941115be
|
89 90 |
struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct sbitmap_queue *bt; |
88459642c
|
91 |
struct sbq_wait_state *ws; |
5d2ee7122
|
92 |
DEFINE_SBQ_WAIT(wait); |
4941115be
|
93 |
unsigned int tag_offset; |
320ae51fe
|
94 |
int tag; |
4941115be
|
95 96 97 |
if (data->flags & BLK_MQ_REQ_RESERVED) { if (unlikely(!tags->nr_reserved_tags)) { WARN_ON_ONCE(1); |
419c3d5e8
|
98 |
return BLK_MQ_NO_TAG; |
4941115be
|
99 |
} |
222a5ae03
|
100 |
bt = tags->breserved_tags; |
4941115be
|
101 102 |
tag_offset = 0; } else { |
222a5ae03
|
103 |
bt = tags->bitmap_tags; |
4941115be
|
104 105 |
tag_offset = tags->nr_reserved_tags; } |
200e86b33
|
106 |
tag = __blk_mq_get_tag(data, bt); |
766473681
|
107 |
if (tag != BLK_MQ_NO_TAG) |
4941115be
|
108 |
goto found_tag; |
4bb659b15
|
109 |
|
6f3b0e8bc
|
110 |
if (data->flags & BLK_MQ_REQ_NOWAIT) |
419c3d5e8
|
111 |
return BLK_MQ_NO_TAG; |
4bb659b15
|
112 |
|
4941115be
|
113 |
ws = bt_wait_ptr(bt, data->hctx); |
4bb659b15
|
114 |
do { |
e6fc46498
|
115 |
struct sbitmap_queue *bt_prev; |
b32232073
|
116 117 118 |
/* * We're out of tags on this hardware queue, kick any * pending IO submits before going to sleep waiting for |
8cecb07d7
|
119 |
* some to complete. |
b32232073
|
120 |
*/ |
8cecb07d7
|
121 |
blk_mq_run_hw_queue(data->hctx, false); |
b32232073
|
122 |
|
080ff3511
|
123 124 125 126 |
/* * Retry tag allocation after running the hardware queue, * as running the queue may also have found completions. */ |
200e86b33
|
127 |
tag = __blk_mq_get_tag(data, bt); |
766473681
|
128 |
if (tag != BLK_MQ_NO_TAG) |
080ff3511
|
129 |
break; |
5d2ee7122
|
130 |
sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE); |
4e5dff41b
|
131 132 |
tag = __blk_mq_get_tag(data, bt); |
766473681
|
133 |
if (tag != BLK_MQ_NO_TAG) |
4e5dff41b
|
134 |
break; |
e6fc46498
|
135 |
bt_prev = bt; |
4bb659b15
|
136 |
io_schedule(); |
cb96a42cc
|
137 |
|
5d2ee7122
|
138 |
sbitmap_finish_wait(bt, ws, &wait); |
cb96a42cc
|
139 |
data->ctx = blk_mq_get_ctx(data->q); |
f9afca4d3
|
140 |
data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, |
8ccdf4a37
|
141 |
data->ctx); |
4941115be
|
142 143 |
tags = blk_mq_tags_from_data(data); if (data->flags & BLK_MQ_REQ_RESERVED) |
222a5ae03
|
144 |
bt = tags->breserved_tags; |
4941115be
|
145 |
else |
222a5ae03
|
146 |
bt = tags->bitmap_tags; |
4941115be
|
147 |
|
e6fc46498
|
148 149 150 151 152 153 154 |
/* * If destination hw queue is changed, fake wake up on * previous queue for compensating the wake up miss, so * other allocations on previous queue won't be starved. */ if (bt != bt_prev) sbitmap_queue_wake_up(bt_prev); |
4941115be
|
155 |
ws = bt_wait_ptr(bt, data->hctx); |
4bb659b15
|
156 |
} while (1); |
5d2ee7122
|
157 |
sbitmap_finish_wait(bt, ws, &wait); |
320ae51fe
|
158 |
|
4941115be
|
159 |
found_tag: |
bf0beec06
|
160 161 162 163 164 165 166 167 |
/* * Give up this allocation if the hctx is inactive. The caller will * retry on an active hctx. */ if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) { blk_mq_put_tag(tags, data->ctx, tag + tag_offset); return BLK_MQ_NO_TAG; } |
4941115be
|
168 |
return tag + tag_offset; |
320ae51fe
|
169 |
} |
cae740a04
|
170 171 |
void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag) |
320ae51fe
|
172 |
{ |
415b806de
|
173 |
if (!blk_mq_tag_is_reserved(tags, tag)) { |
4bb659b15
|
174 |
const int real_tag = tag - tags->nr_reserved_tags; |
70114c393
|
175 |
BUG_ON(real_tag >= tags->nr_tags); |
222a5ae03
|
176 |
sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu); |
70114c393
|
177 178 |
} else { BUG_ON(tag >= tags->nr_reserved_tags); |
222a5ae03
|
179 |
sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu); |
70114c393
|
180 |
} |
320ae51fe
|
181 |
} |
88459642c
|
182 183 184 185 186 187 |
struct bt_iter_data { struct blk_mq_hw_ctx *hctx; busy_iter_fn *fn; void *data; bool reserved; }; |
2e315dc07
|
188 189 190 |
static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags, unsigned int bitnr) { |
bd63141d5
|
191 192 |
struct request *rq; unsigned long flags; |
2e315dc07
|
193 |
|
bd63141d5
|
194 195 |
spin_lock_irqsave(&tags->lock, flags); rq = tags->rqs[bitnr]; |
67f3b2f82
|
196 |
if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref)) |
bd63141d5
|
197 198 |
rq = NULL; spin_unlock_irqrestore(&tags->lock, flags); |
2e315dc07
|
199 200 |
return rq; } |
88459642c
|
201 |
static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
320ae51fe
|
202 |
{ |
88459642c
|
203 204 205 206 |
struct bt_iter_data *iter_data = data; struct blk_mq_hw_ctx *hctx = iter_data->hctx; struct blk_mq_tags *tags = hctx->tags; bool reserved = iter_data->reserved; |
81481eb42
|
207 |
struct request *rq; |
2e315dc07
|
208 |
bool ret = true; |
4bb659b15
|
209 |
|
88459642c
|
210 211 |
if (!reserved) bitnr += tags->nr_reserved_tags; |
7f5562d5e
|
212 213 |
/* * We can hit rq == NULL here, because the tagging functions |
c7b1bf5cc
|
214 |
* test and set the bit before assigning ->rqs[]. |
7f5562d5e
|
215 |
*/ |
2e315dc07
|
216 217 218 219 220 221 222 223 |
rq = blk_mq_find_and_get_req(tags, bitnr); if (!rq) return true; if (rq->q == hctx->queue && rq->mq_hctx == hctx) ret = iter_data->fn(hctx, rq, iter_data->data, reserved); blk_mq_put_rq_ref(rq); return ret; |
88459642c
|
224 |
} |
4bb659b15
|
225 |
|
c7b1bf5cc
|
226 227 228 229 230 231 232 233 |
/** * bt_for_each - iterate over the requests associated with a hardware queue * @hctx: Hardware queue to examine. * @bt: sbitmap to examine. This is either the breserved_tags member * or the bitmap_tags member of struct blk_mq_tags. * @fn: Pointer to the function that will be called for each request * associated with @hctx that has been assigned a driver tag. * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved) |
ab11fe5af
|
234 235 |
* where rq is a pointer to a request. Return true to continue * iterating tags, false to stop. |
c7b1bf5cc
|
236 237 238 239 |
* @data: Will be passed as third argument to @fn. * @reserved: Indicates whether @bt is the breserved_tags member or the * bitmap_tags member of struct blk_mq_tags. */ |
88459642c
|
240 241 242 243 244 245 246 247 248 249 250 |
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, busy_iter_fn *fn, void *data, bool reserved) { struct bt_iter_data iter_data = { .hctx = hctx, .fn = fn, .data = data, .reserved = reserved, }; sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); |
320ae51fe
|
251 |
} |
88459642c
|
252 253 254 255 |
struct bt_tags_iter_data { struct blk_mq_tags *tags; busy_tag_iter_fn *fn; void *data; |
602380d28
|
256 |
unsigned int flags; |
88459642c
|
257 |
}; |
602380d28
|
258 259 |
#define BT_TAG_ITER_RESERVED (1 << 0) #define BT_TAG_ITER_STARTED (1 << 1) |
22f614bc0
|
260 |
#define BT_TAG_ITER_STATIC_RQS (1 << 2) |
602380d28
|
261 |
|
88459642c
|
262 |
static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
f26cdc853
|
263 |
{ |
88459642c
|
264 265 |
struct bt_tags_iter_data *iter_data = data; struct blk_mq_tags *tags = iter_data->tags; |
602380d28
|
266 |
bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED; |
f26cdc853
|
267 |
struct request *rq; |
2e315dc07
|
268 269 |
bool ret = true; bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS); |
f26cdc853
|
270 |
|
88459642c
|
271 272 |
if (!reserved) bitnr += tags->nr_reserved_tags; |
7f5562d5e
|
273 274 275 |
/* * We can hit rq == NULL here, because the tagging functions |
22f614bc0
|
276 |
* test and set the bit before assigning ->rqs[]. |
7f5562d5e
|
277 |
*/ |
2e315dc07
|
278 |
if (iter_static_rqs) |
22f614bc0
|
279 280 |
rq = tags->static_rqs[bitnr]; else |
2e315dc07
|
281 |
rq = blk_mq_find_and_get_req(tags, bitnr); |
602380d28
|
282 283 |
if (!rq) return true; |
2e315dc07
|
284 285 286 287 288 289 290 |
if (!(iter_data->flags & BT_TAG_ITER_STARTED) || blk_mq_request_started(rq)) ret = iter_data->fn(rq, iter_data->data, reserved); if (!iter_static_rqs) blk_mq_put_rq_ref(rq); return ret; |
88459642c
|
291 |
} |
c7b1bf5cc
|
292 293 294 295 296 297 298 |
/** * bt_tags_for_each - iterate over the requests in a tag map * @tags: Tag map to iterate over. * @bt: sbitmap to examine. This is either the breserved_tags member * or the bitmap_tags member of struct blk_mq_tags. * @fn: Pointer to the function that will be called for each started * request. @fn will be called as follows: @fn(rq, @data, |
ab11fe5af
|
299 300 |
* @reserved) where rq is a pointer to a request. Return true * to continue iterating tags, false to stop. |
c7b1bf5cc
|
301 |
* @data: Will be passed as second argument to @fn. |
602380d28
|
302 |
* @flags: BT_TAG_ITER_* |
c7b1bf5cc
|
303 |
*/ |
88459642c
|
304 |
static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, |
602380d28
|
305 |
busy_tag_iter_fn *fn, void *data, unsigned int flags) |
88459642c
|
306 307 308 309 310 |
{ struct bt_tags_iter_data iter_data = { .tags = tags, .fn = fn, .data = data, |
602380d28
|
311 |
.flags = flags, |
88459642c
|
312 313 314 315 |
}; if (tags->rqs) sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); |
f26cdc853
|
316 |
} |
602380d28
|
317 318 319 320 321 322 |
static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv, unsigned int flags) { WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED); if (tags->nr_reserved_tags) |
222a5ae03
|
323 |
bt_tags_for_each(tags, tags->breserved_tags, fn, priv, |
602380d28
|
324 |
flags | BT_TAG_ITER_RESERVED); |
222a5ae03
|
325 |
bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags); |
602380d28
|
326 |
} |
c7b1bf5cc
|
327 |
/** |
602380d28
|
328 |
* blk_mq_all_tag_iter - iterate over all requests in a tag map |
c7b1bf5cc
|
329 |
* @tags: Tag map to iterate over. |
602380d28
|
330 |
* @fn: Pointer to the function that will be called for each |
c7b1bf5cc
|
331 332 |
* request. @fn will be called as follows: @fn(rq, @priv, * reserved) where rq is a pointer to a request. 'reserved' |
ab11fe5af
|
333 334 |
* indicates whether or not @rq is a reserved request. Return * true to continue iterating tags, false to stop. |
c7b1bf5cc
|
335 |
* @priv: Will be passed as second argument to @fn. |
22f614bc0
|
336 337 |
* * Caller has to pass the tag map from which requests are allocated. |
c7b1bf5cc
|
338 |
*/ |
602380d28
|
339 340 |
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv) |
f26cdc853
|
341 |
{ |
a8a5e383c
|
342 |
__blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS); |
f26cdc853
|
343 |
} |
f26cdc853
|
344 |
|
c7b1bf5cc
|
345 346 347 348 349 350 |
/** * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set * @tagset: Tag set to iterate over. * @fn: Pointer to the function that will be called for each started * request. @fn will be called as follows: @fn(rq, @priv, * reserved) where rq is a pointer to a request. 'reserved' |
ab11fe5af
|
351 352 |
* indicates whether or not @rq is a reserved request. Return * true to continue iterating tags, false to stop. |
c7b1bf5cc
|
353 |
* @priv: Will be passed as second argument to @fn. |
2e315dc07
|
354 355 356 |
* * We grab one request reference before calling @fn and release it after * @fn returns. |
c7b1bf5cc
|
357 |
*/ |
e0489487e
|
358 359 360 361 362 363 364 |
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv) { int i; for (i = 0; i < tagset->nr_hw_queues; i++) { if (tagset->tags && tagset->tags[i]) |
602380d28
|
365 366 |
__blk_mq_all_tag_iter(tagset->tags[i], fn, priv, BT_TAG_ITER_STARTED); |
e0489487e
|
367 368 369 |
} } EXPORT_SYMBOL(blk_mq_tagset_busy_iter); |
f9934a80f
|
370 371 372 373 374 375 376 377 378 379 380 |
static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data, bool reserved) { unsigned *count = data; if (blk_mq_request_completed(rq)) (*count)++; return true; } /** |
9cf1adc6d
|
381 382 |
* blk_mq_tagset_wait_completed_request - Wait until all scheduled request * completions have finished. |
f9934a80f
|
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 |
* @tagset: Tag set to drain completed request * * Note: This function has to be run after all IO queues are shutdown */ void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset) { while (true) { unsigned count = 0; blk_mq_tagset_busy_iter(tagset, blk_mq_tagset_count_completed_rqs, &count); if (!count) break; msleep(5); } } EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request); |
c7b1bf5cc
|
400 401 402 403 404 405 406 407 408 409 410 411 412 413 |
/** * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag * @q: Request queue to examine. * @fn: Pointer to the function that will be called for each request * on @q. @fn will be called as follows: @fn(hctx, rq, @priv, * reserved) where rq is a pointer to a request and hctx points * to the hardware queue associated with the request. 'reserved' * indicates whether or not @rq is a reserved request. * @priv: Will be passed as third argument to @fn. * * Note: if @q->tag_set is shared with other request queues then @fn will be * called for all requests on all queues that share that tag set and not only * for requests associated with @q. */ |
0bf6cd5b9
|
414 |
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
81481eb42
|
415 |
void *priv) |
320ae51fe
|
416 |
{ |
0bf6cd5b9
|
417 418 |
struct blk_mq_hw_ctx *hctx; int i; |
f5bbbbe4d
|
419 |
/* |
c7b1bf5cc
|
420 421 |
* __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx * while the queue is frozen. So we can use q_usage_counter to avoid |
76cffccd6
|
422 |
* racing with it. |
f5bbbbe4d
|
423 |
*/ |
530ca2c9b
|
424 |
if (!percpu_ref_tryget(&q->q_usage_counter)) |
f5bbbbe4d
|
425 |
return; |
0bf6cd5b9
|
426 427 428 429 430 |
queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_tags *tags = hctx->tags; /* |
c7b1bf5cc
|
431 |
* If no software queues are currently mapped to this |
0bf6cd5b9
|
432 433 434 435 436 437 |
* hardware queue, there's nothing to check */ if (!blk_mq_hw_queue_mapped(hctx)) continue; if (tags->nr_reserved_tags) |
222a5ae03
|
438 439 |
bt_for_each(hctx, tags->breserved_tags, fn, priv, true); bt_for_each(hctx, tags->bitmap_tags, fn, priv, false); |
4bb659b15
|
440 |
} |
530ca2c9b
|
441 |
blk_queue_exit(q); |
4bb659b15
|
442 |
} |
f4a644db8
|
443 444 |
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, bool round_robin, int node) |
4bb659b15
|
445 |
{ |
f4a644db8
|
446 447 |
return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, node); |
4bb659b15
|
448 |
} |
56b68085e
|
449 450 451 452 |
int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, struct sbitmap_queue *breserved_tags, unsigned int queue_depth, unsigned int reserved, int node, int alloc_policy) |
4bb659b15
|
453 |
{ |
56b68085e
|
454 |
unsigned int depth = queue_depth - reserved; |
f4a644db8
|
455 |
bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; |
4bb659b15
|
456 |
|
56b68085e
|
457 |
if (bt_alloc(bitmap_tags, depth, round_robin, node)) |
4d063237b
|
458 |
return -ENOMEM; |
56b68085e
|
459 |
if (bt_alloc(breserved_tags, reserved, round_robin, node)) |
88459642c
|
460 |
goto free_bitmap_tags; |
4bb659b15
|
461 |
|
56b68085e
|
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 |
return 0; free_bitmap_tags: sbitmap_queue_free(bitmap_tags); return -ENOMEM; } static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, int node, int alloc_policy) { int ret; ret = blk_mq_init_bitmaps(&tags->__bitmap_tags, &tags->__breserved_tags, tags->nr_tags, tags->nr_reserved_tags, node, alloc_policy); if (ret) return ret; |
222a5ae03
|
480 481 |
tags->bitmap_tags = &tags->__bitmap_tags; tags->breserved_tags = &tags->__breserved_tags; |
4d063237b
|
482 |
return 0; |
4bb659b15
|
483 |
} |
56b68085e
|
484 |
int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set) |
32bc15afe
|
485 |
{ |
32bc15afe
|
486 |
int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); |
56b68085e
|
487 |
int i, ret; |
32bc15afe
|
488 |
|
56b68085e
|
489 490 491 492 493 |
ret = blk_mq_init_bitmaps(&set->__bitmap_tags, &set->__breserved_tags, set->queue_depth, set->reserved_tags, set->numa_node, alloc_policy); if (ret) return ret; |
32bc15afe
|
494 495 496 497 498 499 500 501 502 |
for (i = 0; i < set->nr_hw_queues; i++) { struct blk_mq_tags *tags = set->tags[i]; tags->bitmap_tags = &set->__bitmap_tags; tags->breserved_tags = &set->__breserved_tags; } return 0; |
32bc15afe
|
503 504 505 506 507 508 509 |
} void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set) { sbitmap_queue_free(&set->__bitmap_tags); sbitmap_queue_free(&set->__breserved_tags); } |
320ae51fe
|
510 |
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
24391c0dc
|
511 |
unsigned int reserved_tags, |
1c0706a70
|
512 |
int node, unsigned int flags) |
320ae51fe
|
513 |
{ |
1c0706a70
|
514 |
int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags); |
320ae51fe
|
515 |
struct blk_mq_tags *tags; |
320ae51fe
|
516 517 518 519 520 521 522 523 524 525 |
if (total_tags > BLK_MQ_TAG_MAX) { pr_err("blk-mq: tag depth too large "); return NULL; } tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); if (!tags) return NULL; |
320ae51fe
|
526 527 |
tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; |
bd63141d5
|
528 |
spin_lock_init(&tags->lock); |
320ae51fe
|
529 |
|
39aa56db5
|
530 |
if (blk_mq_is_sbitmap_shared(flags)) |
32bc15afe
|
531 |
return tags; |
4d063237b
|
532 533 534 535 536 |
if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) { kfree(tags); return NULL; } return tags; |
320ae51fe
|
537 |
} |
1c0706a70
|
538 |
void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags) |
320ae51fe
|
539 |
{ |
39aa56db5
|
540 |
if (!blk_mq_is_sbitmap_shared(flags)) { |
32bc15afe
|
541 542 543 |
sbitmap_queue_free(tags->bitmap_tags); sbitmap_queue_free(tags->breserved_tags); } |
320ae51fe
|
544 545 |
kfree(tags); } |
70f36b600
|
546 547 548 |
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tagsptr, unsigned int tdepth, bool can_grow) |
e3a2b3f93
|
549 |
{ |
70f36b600
|
550 551 552 |
struct blk_mq_tags *tags = *tagsptr; if (tdepth <= tags->nr_reserved_tags) |
e3a2b3f93
|
553 554 555 |
return -EINVAL; /* |
70f36b600
|
556 557 |
* If we are allowed to grow beyond the original size, allocate * a new set of tags before freeing the old one. |
e3a2b3f93
|
558 |
*/ |
70f36b600
|
559 560 561 562 563 564 565 566 567 568 569 570 |
if (tdepth > tags->nr_tags) { struct blk_mq_tag_set *set = hctx->queue->tag_set; struct blk_mq_tags *new; bool ret; if (!can_grow) return -EINVAL; /* * We need some sort of upper limit, set it high enough that * no valid use cases should require more. */ |
d97e594c5
|
571 |
if (tdepth > MAX_SCHED_RQ) |
70f36b600
|
572 |
return -EINVAL; |
75d6e175f
|
573 |
new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, |
d97e594c5
|
574 |
tags->nr_reserved_tags, set->flags); |
70f36b600
|
575 576 577 578 |
if (!new) return -ENOMEM; ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); if (ret) { |
d97e594c5
|
579 |
blk_mq_free_rq_map(new, set->flags); |
70f36b600
|
580 581 582 583 |
return -ENOMEM; } blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); |
d97e594c5
|
584 |
blk_mq_free_rq_map(*tagsptr, set->flags); |
70f36b600
|
585 586 587 588 589 590 |
*tagsptr = new; } else { /* * Don't need (or can't) update reserved tags here, they * remain static and should never need resizing. */ |
222a5ae03
|
591 |
sbitmap_queue_resize(tags->bitmap_tags, |
75d6e175f
|
592 |
tdepth - tags->nr_reserved_tags); |
70f36b600
|
593 |
} |
88459642c
|
594 |
|
e3a2b3f93
|
595 596 |
return 0; } |
32bc15afe
|
597 598 599 600 |
void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size) { sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags); } |
205fb5f5b
|
601 602 603 604 605 606 607 608 609 610 611 612 613 614 |
/** * blk_mq_unique_tag() - return a tag that is unique queue-wide * @rq: request for which to compute a unique tag * * The tag field in struct request is unique per hardware queue but not over * all hardware queues. Hence this function that returns a tag with the * hardware context index in the upper bits and the per hardware queue tag in * the lower bits. * * Note: When called for a request that is queued on a non-multiqueue request * queue, the hardware context index is set to zero. */ u32 blk_mq_unique_tag(struct request *rq) { |
ea4f995ee
|
615 |
return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) | |
205fb5f5b
|
616 617 618 |
(rq->tag & BLK_MQ_UNIQUE_TAG_MASK); } EXPORT_SYMBOL(blk_mq_unique_tag); |