Blame view
block/blk-mq-tag.c
11.4 KB
75bb4625b blk-mq: add file ... |
1 |
/* |
88459642c blk-mq: abstract ... |
2 3 4 |
* Tag allocation using scalable bitmaps. Uses active queue tracking to support * fairer distribution of tags between multiple submitters when a shared tag map * is used. |
75bb4625b blk-mq: add file ... |
5 6 7 |
* * Copyright (C) 2013-2014 Jens Axboe */ |
320ae51fe blk-mq: new multi... |
8 9 |
#include <linux/kernel.h> #include <linux/module.h> |
320ae51fe blk-mq: new multi... |
10 11 12 13 14 |
#include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" |
320ae51fe blk-mq: new multi... |
15 16 |
bool blk_mq_has_free_tags(struct blk_mq_tags *tags) { |
4bb659b15 blk-mq: implement... |
17 18 |
if (!tags) return true; |
88459642c blk-mq: abstract ... |
19 |
return sbitmap_any_bit_clear(&tags->bitmap_tags.sb); |
0d2602ca3 blk-mq: improve s... |
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
} /* * If a previously inactive queue goes active, bump the active user count. */ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) atomic_inc(&hctx->tags->active_queues); return true; } /* |
aed3ea94b block: wake up wa... |
35 |
* Wakeup all potentially sleeping on tags |
0d2602ca3 blk-mq: improve s... |
36 |
*/ |
aed3ea94b block: wake up wa... |
37 |
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
0d2602ca3 blk-mq: improve s... |
38 |
{ |
88459642c blk-mq: abstract ... |
39 40 41 |
sbitmap_queue_wake_all(&tags->bitmap_tags); if (include_reserve) sbitmap_queue_wake_all(&tags->breserved_tags); |
0d2602ca3 blk-mq: improve s... |
42 43 44 |
} /* |
e3a2b3f93 blk-mq: allow cha... |
45 46 47 48 49 50 51 52 53 54 55 |
* If a previously busy queue goes inactive, potential waiters could now * be allowed to queue. Wake them up and check. */ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) { struct blk_mq_tags *tags = hctx->tags; if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return; atomic_dec(&tags->active_queues); |
aed3ea94b block: wake up wa... |
56 |
blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f93 blk-mq: allow cha... |
57 58 59 |
} /* |
0d2602ca3 blk-mq: improve s... |
60 61 62 63 |
* For shared tag users, we track the number of currently active users * and attempt to provide a fair share of the tag depth for each of them. */ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, |
88459642c blk-mq: abstract ... |
64 |
struct sbitmap_queue *bt) |
0d2602ca3 blk-mq: improve s... |
65 66 67 68 69 70 71 72 73 74 75 |
{ unsigned int depth, users; if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) return true; if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return true; /* * Don't try dividing an ant */ |
88459642c blk-mq: abstract ... |
76 |
if (bt->sb.depth == 1) |
0d2602ca3 blk-mq: improve s... |
77 78 79 80 81 82 83 84 85 |
return true; users = atomic_read(&hctx->tags->active_queues); if (!users) return true; /* * Allow at least some tags */ |
88459642c blk-mq: abstract ... |
86 |
depth = max((bt->sb.depth + users - 1) / users, 4U); |
0d2602ca3 blk-mq: improve s... |
87 88 |
return atomic_read(&hctx->nr_active) < depth; } |
200e86b33 blk-mq: only appl... |
89 90 |
static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt) |
4bb659b15 blk-mq: implement... |
91 |
{ |
200e86b33 blk-mq: only appl... |
92 93 |
if (!(data->flags & BLK_MQ_REQ_INTERNAL) && !hctx_may_queue(data->hctx, bt)) |
0d2602ca3 blk-mq: improve s... |
94 |
return -1; |
229a92873 blk-mq: add shall... |
95 96 97 98 |
if (data->shallow_depth) return __sbitmap_queue_get_shallow(bt, data->shallow_depth); else return __sbitmap_queue_get(bt); |
4bb659b15 blk-mq: implement... |
99 |
} |
4941115be blk-mq-tag: clean... |
100 |
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
320ae51fe blk-mq: new multi... |
101 |
{ |
4941115be blk-mq-tag: clean... |
102 103 |
struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct sbitmap_queue *bt; |
88459642c blk-mq: abstract ... |
104 |
struct sbq_wait_state *ws; |
4bb659b15 blk-mq: implement... |
105 |
DEFINE_WAIT(wait); |
4941115be blk-mq-tag: clean... |
106 |
unsigned int tag_offset; |
bd6737f1a blk-mq-sched: add... |
107 |
bool drop_ctx; |
320ae51fe blk-mq: new multi... |
108 |
int tag; |
4941115be blk-mq-tag: clean... |
109 110 111 112 113 114 115 116 117 118 119 |
if (data->flags & BLK_MQ_REQ_RESERVED) { if (unlikely(!tags->nr_reserved_tags)) { WARN_ON_ONCE(1); return BLK_MQ_TAG_FAIL; } bt = &tags->breserved_tags; tag_offset = 0; } else { bt = &tags->bitmap_tags; tag_offset = tags->nr_reserved_tags; } |
200e86b33 blk-mq: only appl... |
120 |
tag = __blk_mq_get_tag(data, bt); |
4bb659b15 blk-mq: implement... |
121 |
if (tag != -1) |
4941115be blk-mq-tag: clean... |
122 |
goto found_tag; |
4bb659b15 blk-mq: implement... |
123 |
|
6f3b0e8bc blk-mq: add a fla... |
124 |
if (data->flags & BLK_MQ_REQ_NOWAIT) |
4941115be blk-mq-tag: clean... |
125 |
return BLK_MQ_TAG_FAIL; |
4bb659b15 blk-mq: implement... |
126 |
|
4941115be blk-mq-tag: clean... |
127 |
ws = bt_wait_ptr(bt, data->hctx); |
bd6737f1a blk-mq-sched: add... |
128 |
drop_ctx = data->ctx == NULL; |
4bb659b15 blk-mq: implement... |
129 |
do { |
88459642c blk-mq: abstract ... |
130 |
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE); |
4bb659b15 blk-mq: implement... |
131 |
|
200e86b33 blk-mq: only appl... |
132 |
tag = __blk_mq_get_tag(data, bt); |
4bb659b15 blk-mq: implement... |
133 134 |
if (tag != -1) break; |
b32232073 blk-mq: fix hang ... |
135 136 137 |
/* * We're out of tags on this hardware queue, kick any * pending IO submits before going to sleep waiting for |
8cecb07d7 blk-mq-tag: remov... |
138 |
* some to complete. |
b32232073 blk-mq: fix hang ... |
139 |
*/ |
8cecb07d7 blk-mq-tag: remov... |
140 |
blk_mq_run_hw_queue(data->hctx, false); |
b32232073 blk-mq: fix hang ... |
141 |
|
080ff3511 blk-mq: re-check ... |
142 143 144 145 |
/* * Retry tag allocation after running the hardware queue, * as running the queue may also have found completions. */ |
200e86b33 blk-mq: only appl... |
146 |
tag = __blk_mq_get_tag(data, bt); |
080ff3511 blk-mq: re-check ... |
147 148 |
if (tag != -1) break; |
bd6737f1a blk-mq-sched: add... |
149 150 |
if (data->ctx) blk_mq_put_ctx(data->ctx); |
cb96a42cc blk-mq: fix sched... |
151 |
|
4bb659b15 blk-mq: implement... |
152 |
io_schedule(); |
cb96a42cc blk-mq: fix sched... |
153 154 |
data->ctx = blk_mq_get_ctx(data->q); |
7d7e0f90b blk-mq: remove ->... |
155 |
data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); |
4941115be blk-mq-tag: clean... |
156 157 158 159 160 |
tags = blk_mq_tags_from_data(data); if (data->flags & BLK_MQ_REQ_RESERVED) bt = &tags->breserved_tags; else bt = &tags->bitmap_tags; |
88459642c blk-mq: abstract ... |
161 |
finish_wait(&ws->wait, &wait); |
4941115be blk-mq-tag: clean... |
162 |
ws = bt_wait_ptr(bt, data->hctx); |
4bb659b15 blk-mq: implement... |
163 |
} while (1); |
bd6737f1a blk-mq-sched: add... |
164 165 |
if (drop_ctx && data->ctx) blk_mq_put_ctx(data->ctx); |
88459642c blk-mq: abstract ... |
166 |
finish_wait(&ws->wait, &wait); |
320ae51fe blk-mq: new multi... |
167 |
|
4941115be blk-mq-tag: clean... |
168 169 |
found_tag: return tag + tag_offset; |
320ae51fe blk-mq: new multi... |
170 |
} |
4941115be blk-mq-tag: clean... |
171 172 |
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag) |
320ae51fe blk-mq: new multi... |
173 |
{ |
415b806de blk-mq-sched: All... |
174 |
if (!blk_mq_tag_is_reserved(tags, tag)) { |
4bb659b15 blk-mq: implement... |
175 |
const int real_tag = tag - tags->nr_reserved_tags; |
70114c393 blk-mq: cleanup t... |
176 |
BUG_ON(real_tag >= tags->nr_tags); |
f4a644db8 sbitmap: push all... |
177 |
sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); |
70114c393 blk-mq: cleanup t... |
178 179 |
} else { BUG_ON(tag >= tags->nr_reserved_tags); |
f4a644db8 sbitmap: push all... |
180 |
sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); |
70114c393 blk-mq: cleanup t... |
181 |
} |
320ae51fe blk-mq: new multi... |
182 |
} |
88459642c blk-mq: abstract ... |
183 184 185 186 187 188 189 190 |
struct bt_iter_data { struct blk_mq_hw_ctx *hctx; busy_iter_fn *fn; void *data; bool reserved; }; static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
320ae51fe blk-mq: new multi... |
191 |
{ |
88459642c blk-mq: abstract ... |
192 193 194 195 |
struct bt_iter_data *iter_data = data; struct blk_mq_hw_ctx *hctx = iter_data->hctx; struct blk_mq_tags *tags = hctx->tags; bool reserved = iter_data->reserved; |
81481eb42 blk-mq: fix and s... |
196 |
struct request *rq; |
4bb659b15 blk-mq: implement... |
197 |
|
88459642c blk-mq: abstract ... |
198 199 200 |
if (!reserved) bitnr += tags->nr_reserved_tags; rq = tags->rqs[bitnr]; |
4bb659b15 blk-mq: implement... |
201 |
|
7f5562d5e blk-mq-tag: check... |
202 203 204 205 206 |
/* * We can hit rq == NULL here, because the tagging functions * test and set the bit before assining ->rqs[]. */ if (rq && rq->q == hctx->queue) |
88459642c blk-mq: abstract ... |
207 208 209 |
iter_data->fn(hctx, rq, iter_data->data, reserved); return true; } |
4bb659b15 blk-mq: implement... |
210 |
|
88459642c blk-mq: abstract ... |
211 212 213 214 215 216 217 218 219 220 221 |
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, busy_iter_fn *fn, void *data, bool reserved) { struct bt_iter_data iter_data = { .hctx = hctx, .fn = fn, .data = data, .reserved = reserved, }; sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); |
320ae51fe blk-mq: new multi... |
222 |
} |
88459642c blk-mq: abstract ... |
223 224 225 226 227 228 229 230 |
struct bt_tags_iter_data { struct blk_mq_tags *tags; busy_tag_iter_fn *fn; void *data; bool reserved; }; static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
f26cdc853 blk-mq: Shared ta... |
231 |
{ |
88459642c blk-mq: abstract ... |
232 233 234 |
struct bt_tags_iter_data *iter_data = data; struct blk_mq_tags *tags = iter_data->tags; bool reserved = iter_data->reserved; |
f26cdc853 blk-mq: Shared ta... |
235 |
struct request *rq; |
f26cdc853 blk-mq: Shared ta... |
236 |
|
88459642c blk-mq: abstract ... |
237 238 |
if (!reserved) bitnr += tags->nr_reserved_tags; |
7f5562d5e blk-mq-tag: check... |
239 240 241 242 243 |
/* * We can hit rq == NULL here, because the tagging functions * test and set the bit before assining ->rqs[]. */ |
88459642c blk-mq: abstract ... |
244 |
rq = tags->rqs[bitnr]; |
7f5562d5e blk-mq-tag: check... |
245 246 |
if (rq) iter_data->fn(rq, iter_data->data, reserved); |
f26cdc853 blk-mq: Shared ta... |
247 |
|
88459642c blk-mq: abstract ... |
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 |
return true; } static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, busy_tag_iter_fn *fn, void *data, bool reserved) { struct bt_tags_iter_data iter_data = { .tags = tags, .fn = fn, .data = data, .reserved = reserved, }; if (tags->rqs) sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); |
f26cdc853 blk-mq: Shared ta... |
263 |
} |
e8f1e1630 blk-mq: Make blk_... |
264 265 |
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv) |
f26cdc853 blk-mq: Shared ta... |
266 267 |
{ if (tags->nr_reserved_tags) |
88459642c blk-mq: abstract ... |
268 269 |
bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true); bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false); |
f26cdc853 blk-mq: Shared ta... |
270 |
} |
f26cdc853 blk-mq: Shared ta... |
271 |
|
e0489487e blk-mq: Export ta... |
272 273 274 275 276 277 278 279 280 281 282 |
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv) { int i; for (i = 0; i < tagset->nr_hw_queues; i++) { if (tagset->tags && tagset->tags[i]) blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); } } EXPORT_SYMBOL(blk_mq_tagset_busy_iter); |
d352ae205 blk-mq: Make blk_... |
283 284 |
int blk_mq_reinit_tagset(struct blk_mq_tag_set *set, int (reinit_request)(void *, struct request *)) |
486cf9899 blk-mq: Introduce... |
285 286 |
{ int i, j, ret = 0; |
d352ae205 blk-mq: Make blk_... |
287 |
if (WARN_ON_ONCE(!reinit_request)) |
486cf9899 blk-mq: Introduce... |
288 289 290 291 |
goto out; for (i = 0; i < set->nr_hw_queues; i++) { struct blk_mq_tags *tags = set->tags[i]; |
0067d4b02 blk-mq: Fix tagse... |
292 293 |
if (!tags) continue; |
486cf9899 blk-mq: Introduce... |
294 |
for (j = 0; j < tags->nr_tags; j++) { |
2af8cbe30 blk-mq: split tag... |
295 |
if (!tags->static_rqs[j]) |
486cf9899 blk-mq: Introduce... |
296 |
continue; |
d352ae205 blk-mq: Make blk_... |
297 298 |
ret = reinit_request(set->driver_data, tags->static_rqs[j]); |
486cf9899 blk-mq: Introduce... |
299 300 301 302 303 304 305 306 307 |
if (ret) goto out; } } out: return ret; } EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset); |
0bf6cd5b9 blk-mq: factor ou... |
308 |
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
81481eb42 blk-mq: fix and s... |
309 |
void *priv) |
320ae51fe blk-mq: new multi... |
310 |
{ |
0bf6cd5b9 blk-mq: factor ou... |
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 |
struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_tags *tags = hctx->tags; /* * If not software queues are currently mapped to this * hardware queue, there's nothing to check */ if (!blk_mq_hw_queue_mapped(hctx)) continue; if (tags->nr_reserved_tags) |
88459642c blk-mq: abstract ... |
326 327 |
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); |
4bb659b15 blk-mq: implement... |
328 |
} |
4bb659b15 blk-mq: implement... |
329 |
} |
f4a644db8 sbitmap: push all... |
330 331 |
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, bool round_robin, int node) |
4bb659b15 blk-mq: implement... |
332 |
{ |
f4a644db8 sbitmap: push all... |
333 334 |
return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, node); |
4bb659b15 blk-mq: implement... |
335 336 337 |
} static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, |
24391c0dc blk-mq: add tag a... |
338 |
int node, int alloc_policy) |
4bb659b15 blk-mq: implement... |
339 340 |
{ unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; |
f4a644db8 sbitmap: push all... |
341 |
bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; |
4bb659b15 blk-mq: implement... |
342 |
|
f4a644db8 sbitmap: push all... |
343 |
if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) |
88459642c blk-mq: abstract ... |
344 |
goto free_tags; |
f4a644db8 sbitmap: push all... |
345 346 |
if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin, node)) |
88459642c blk-mq: abstract ... |
347 |
goto free_bitmap_tags; |
4bb659b15 blk-mq: implement... |
348 349 |
return tags; |
88459642c blk-mq: abstract ... |
350 351 352 |
free_bitmap_tags: sbitmap_queue_free(&tags->bitmap_tags); free_tags: |
4bb659b15 blk-mq: implement... |
353 354 355 |
kfree(tags); return NULL; } |
320ae51fe blk-mq: new multi... |
356 |
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
24391c0dc blk-mq: add tag a... |
357 358 |
unsigned int reserved_tags, int node, int alloc_policy) |
320ae51fe blk-mq: new multi... |
359 |
{ |
320ae51fe blk-mq: new multi... |
360 |
struct blk_mq_tags *tags; |
320ae51fe blk-mq: new multi... |
361 362 363 364 365 366 367 368 369 370 |
if (total_tags > BLK_MQ_TAG_MAX) { pr_err("blk-mq: tag depth too large "); return NULL; } tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); if (!tags) return NULL; |
320ae51fe blk-mq: new multi... |
371 372 |
tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; |
320ae51fe blk-mq: new multi... |
373 |
|
24391c0dc blk-mq: add tag a... |
374 |
return blk_mq_init_bitmap_tags(tags, node, alloc_policy); |
320ae51fe blk-mq: new multi... |
375 376 377 378 |
} void blk_mq_free_tags(struct blk_mq_tags *tags) { |
88459642c blk-mq: abstract ... |
379 380 |
sbitmap_queue_free(&tags->bitmap_tags); sbitmap_queue_free(&tags->breserved_tags); |
320ae51fe blk-mq: new multi... |
381 382 |
kfree(tags); } |
70f36b600 blk-mq: allow res... |
383 384 385 |
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tagsptr, unsigned int tdepth, bool can_grow) |
e3a2b3f93 blk-mq: allow cha... |
386 |
{ |
70f36b600 blk-mq: allow res... |
387 388 389 |
struct blk_mq_tags *tags = *tagsptr; if (tdepth <= tags->nr_reserved_tags) |
e3a2b3f93 blk-mq: allow cha... |
390 391 392 |
return -EINVAL; /* |
70f36b600 blk-mq: allow res... |
393 394 |
* If we are allowed to grow beyond the original size, allocate * a new set of tags before freeing the old one. |
e3a2b3f93 blk-mq: allow cha... |
395 |
*/ |
70f36b600 blk-mq: allow res... |
396 397 398 399 400 401 402 403 404 405 406 407 408 409 |
if (tdepth > tags->nr_tags) { struct blk_mq_tag_set *set = hctx->queue->tag_set; struct blk_mq_tags *new; bool ret; if (!can_grow) return -EINVAL; /* * We need some sort of upper limit, set it high enough that * no valid use cases should require more. */ if (tdepth > 16 * BLKDEV_MAX_RQ) return -EINVAL; |
3ddbcd49b blk-mq: fix updat... |
410 411 |
new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, tags->nr_reserved_tags); |
70f36b600 blk-mq: allow res... |
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 |
if (!new) return -ENOMEM; ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); if (ret) { blk_mq_free_rq_map(new); return -ENOMEM; } blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); blk_mq_free_rq_map(*tagsptr); *tagsptr = new; } else { /* * Don't need (or can't) update reserved tags here, they * remain static and should never need resizing. */ |
3ddbcd49b blk-mq: fix updat... |
428 429 |
sbitmap_queue_resize(&tags->bitmap_tags, tdepth - tags->nr_reserved_tags); |
70f36b600 blk-mq: allow res... |
430 |
} |
88459642c blk-mq: abstract ... |
431 |
|
e3a2b3f93 blk-mq: allow cha... |
432 433 |
return 0; } |
205fb5f5b blk-mq: add blk_m... |
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 |
/** * blk_mq_unique_tag() - return a tag that is unique queue-wide * @rq: request for which to compute a unique tag * * The tag field in struct request is unique per hardware queue but not over * all hardware queues. Hence this function that returns a tag with the * hardware context index in the upper bits and the per hardware queue tag in * the lower bits. * * Note: When called for a request that is queued on a non-multiqueue request * queue, the hardware context index is set to zero. */ u32 blk_mq_unique_tag(struct request *rq) { struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; int hwq = 0; if (q->mq_ops) { |
7d7e0f90b blk-mq: remove ->... |
453 |
hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); |
205fb5f5b blk-mq: add blk_m... |
454 455 456 457 458 459 460 |
hwq = hctx->queue_num; } return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); } EXPORT_SYMBOL(blk_mq_unique_tag); |