Blame view
block/blk-mq-tag.c
11.1 KB
75bb4625b blk-mq: add file ... |
1 |
/* |
88459642c blk-mq: abstract ... |
2 3 4 |
* Tag allocation using scalable bitmaps. Uses active queue tracking to support * fairer distribution of tags between multiple submitters when a shared tag map * is used. |
75bb4625b blk-mq: add file ... |
5 6 7 |
* * Copyright (C) 2013-2014 Jens Axboe */ |
320ae51fe blk-mq: new multi... |
8 9 |
#include <linux/kernel.h> #include <linux/module.h> |
320ae51fe blk-mq: new multi... |
10 11 12 13 14 |
#include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" |
320ae51fe blk-mq: new multi... |
15 16 |
bool blk_mq_has_free_tags(struct blk_mq_tags *tags) { |
4bb659b15 blk-mq: implement... |
17 18 |
if (!tags) return true; |
88459642c blk-mq: abstract ... |
19 |
return sbitmap_any_bit_clear(&tags->bitmap_tags.sb); |
0d2602ca3 blk-mq: improve s... |
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
} /* * If a previously inactive queue goes active, bump the active user count. */ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) atomic_inc(&hctx->tags->active_queues); return true; } /* |
aed3ea94b block: wake up wa... |
35 |
* Wakeup all potentially sleeping on tags |
0d2602ca3 blk-mq: improve s... |
36 |
*/ |
aed3ea94b block: wake up wa... |
37 |
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
0d2602ca3 blk-mq: improve s... |
38 |
{ |
88459642c blk-mq: abstract ... |
39 40 41 |
sbitmap_queue_wake_all(&tags->bitmap_tags); if (include_reserve) sbitmap_queue_wake_all(&tags->breserved_tags); |
0d2602ca3 blk-mq: improve s... |
42 43 44 |
} /* |
e3a2b3f93 blk-mq: allow cha... |
45 46 47 48 49 50 51 52 53 54 55 |
* If a previously busy queue goes inactive, potential waiters could now * be allowed to queue. Wake them up and check. */ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) { struct blk_mq_tags *tags = hctx->tags; if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return; atomic_dec(&tags->active_queues); |
aed3ea94b block: wake up wa... |
56 |
blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f93 blk-mq: allow cha... |
57 58 59 |
} /* |
0d2602ca3 blk-mq: improve s... |
60 61 62 63 |
* For shared tag users, we track the number of currently active users * and attempt to provide a fair share of the tag depth for each of them. */ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, |
88459642c blk-mq: abstract ... |
64 |
struct sbitmap_queue *bt) |
0d2602ca3 blk-mq: improve s... |
65 66 67 68 69 70 71 72 73 74 75 |
{ unsigned int depth, users; if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) return true; if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return true; /* * Don't try dividing an ant */ |
88459642c blk-mq: abstract ... |
76 |
if (bt->sb.depth == 1) |
0d2602ca3 blk-mq: improve s... |
77 78 79 80 81 82 83 84 85 |
return true; users = atomic_read(&hctx->tags->active_queues); if (!users) return true; /* * Allow at least some tags */ |
88459642c blk-mq: abstract ... |
86 |
depth = max((bt->sb.depth + users - 1) / users, 4U); |
0d2602ca3 blk-mq: improve s... |
87 88 |
return atomic_read(&hctx->nr_active) < depth; } |
f4a644db8 sbitmap: push all... |
89 |
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt) |
4bb659b15 blk-mq: implement... |
90 |
{ |
0d2602ca3 blk-mq: improve s... |
91 92 |
if (!hctx_may_queue(hctx, bt)) return -1; |
f4a644db8 sbitmap: push all... |
93 |
return __sbitmap_queue_get(bt); |
4bb659b15 blk-mq: implement... |
94 |
} |
40aabb674 sbitmap: push per... |
95 96 |
static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt, struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags) |
320ae51fe blk-mq: new multi... |
97 |
{ |
88459642c blk-mq: abstract ... |
98 |
struct sbq_wait_state *ws; |
4bb659b15 blk-mq: implement... |
99 |
DEFINE_WAIT(wait); |
320ae51fe blk-mq: new multi... |
100 |
int tag; |
f4a644db8 sbitmap: push all... |
101 |
tag = __bt_get(hctx, bt); |
4bb659b15 blk-mq: implement... |
102 103 |
if (tag != -1) return tag; |
6f3b0e8bc blk-mq: add a fla... |
104 |
if (data->flags & BLK_MQ_REQ_NOWAIT) |
4bb659b15 blk-mq: implement... |
105 |
return -1; |
88459642c blk-mq: abstract ... |
106 |
ws = bt_wait_ptr(bt, hctx); |
4bb659b15 blk-mq: implement... |
107 |
do { |
88459642c blk-mq: abstract ... |
108 |
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE); |
4bb659b15 blk-mq: implement... |
109 |
|
f4a644db8 sbitmap: push all... |
110 |
tag = __bt_get(hctx, bt); |
4bb659b15 blk-mq: implement... |
111 112 |
if (tag != -1) break; |
b32232073 blk-mq: fix hang ... |
113 114 115 |
/* * We're out of tags on this hardware queue, kick any * pending IO submits before going to sleep waiting for |
bc188d818 blkmq: Fix NULL p... |
116 117 |
* some to complete. Note that hctx can be NULL here for * reserved tag allocation. |
b32232073 blk-mq: fix hang ... |
118 |
*/ |
bc188d818 blkmq: Fix NULL p... |
119 120 |
if (hctx) blk_mq_run_hw_queue(hctx, false); |
b32232073 blk-mq: fix hang ... |
121 |
|
080ff3511 blk-mq: re-check ... |
122 123 124 125 |
/* * Retry tag allocation after running the hardware queue, * as running the queue may also have found completions. */ |
f4a644db8 sbitmap: push all... |
126 |
tag = __bt_get(hctx, bt); |
080ff3511 blk-mq: re-check ... |
127 128 |
if (tag != -1) break; |
cb96a42cc blk-mq: fix sched... |
129 |
blk_mq_put_ctx(data->ctx); |
4bb659b15 blk-mq: implement... |
130 |
io_schedule(); |
cb96a42cc blk-mq: fix sched... |
131 132 |
data->ctx = blk_mq_get_ctx(data->q); |
7d7e0f90b blk-mq: remove ->... |
133 |
data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); |
6f3b0e8bc blk-mq: add a fla... |
134 |
if (data->flags & BLK_MQ_REQ_RESERVED) { |
cb96a42cc blk-mq: fix sched... |
135 136 |
bt = &data->hctx->tags->breserved_tags; } else { |
cb96a42cc blk-mq: fix sched... |
137 138 139 |
hctx = data->hctx; bt = &hctx->tags->bitmap_tags; } |
88459642c blk-mq: abstract ... |
140 141 |
finish_wait(&ws->wait, &wait); ws = bt_wait_ptr(bt, hctx); |
4bb659b15 blk-mq: implement... |
142 |
} while (1); |
88459642c blk-mq: abstract ... |
143 |
finish_wait(&ws->wait, &wait); |
4bb659b15 blk-mq: implement... |
144 145 |
return tag; } |
cb96a42cc blk-mq: fix sched... |
146 |
static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) |
4bb659b15 blk-mq: implement... |
147 148 |
{ int tag; |
cb96a42cc blk-mq: fix sched... |
149 |
tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, |
40aabb674 sbitmap: push per... |
150 |
data->hctx->tags); |
4bb659b15 blk-mq: implement... |
151 |
if (tag >= 0) |
cb96a42cc blk-mq: fix sched... |
152 |
return tag + data->hctx->tags->nr_reserved_tags; |
4bb659b15 blk-mq: implement... |
153 154 |
return BLK_MQ_TAG_FAIL; |
320ae51fe blk-mq: new multi... |
155 |
} |
cb96a42cc blk-mq: fix sched... |
156 |
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) |
320ae51fe blk-mq: new multi... |
157 |
{ |
40aabb674 sbitmap: push per... |
158 |
int tag; |
320ae51fe blk-mq: new multi... |
159 |
|
cb96a42cc blk-mq: fix sched... |
160 |
if (unlikely(!data->hctx->tags->nr_reserved_tags)) { |
320ae51fe blk-mq: new multi... |
161 162 163 |
WARN_ON_ONCE(1); return BLK_MQ_TAG_FAIL; } |
40aabb674 sbitmap: push per... |
164 165 |
tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, data->hctx->tags); |
320ae51fe blk-mq: new multi... |
166 167 |
if (tag < 0) return BLK_MQ_TAG_FAIL; |
4bb659b15 blk-mq: implement... |
168 |
|
320ae51fe blk-mq: new multi... |
169 170 |
return tag; } |
cb96a42cc blk-mq: fix sched... |
171 |
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
320ae51fe blk-mq: new multi... |
172 |
{ |
6f3b0e8bc blk-mq: add a fla... |
173 174 175 |
if (data->flags & BLK_MQ_REQ_RESERVED) return __blk_mq_get_reserved_tag(data); return __blk_mq_get_tag(data); |
320ae51fe blk-mq: new multi... |
176 |
} |
40aabb674 sbitmap: push per... |
177 178 |
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, unsigned int tag) |
320ae51fe blk-mq: new multi... |
179 |
{ |
0d2602ca3 blk-mq: improve s... |
180 |
struct blk_mq_tags *tags = hctx->tags; |
4bb659b15 blk-mq: implement... |
181 182 |
if (tag >= tags->nr_reserved_tags) { const int real_tag = tag - tags->nr_reserved_tags; |
70114c393 blk-mq: cleanup t... |
183 |
BUG_ON(real_tag >= tags->nr_tags); |
f4a644db8 sbitmap: push all... |
184 |
sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); |
70114c393 blk-mq: cleanup t... |
185 186 |
} else { BUG_ON(tag >= tags->nr_reserved_tags); |
f4a644db8 sbitmap: push all... |
187 |
sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); |
70114c393 blk-mq: cleanup t... |
188 |
} |
320ae51fe blk-mq: new multi... |
189 |
} |
88459642c blk-mq: abstract ... |
190 191 192 193 194 195 196 197 |
struct bt_iter_data { struct blk_mq_hw_ctx *hctx; busy_iter_fn *fn; void *data; bool reserved; }; static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
320ae51fe blk-mq: new multi... |
198 |
{ |
88459642c blk-mq: abstract ... |
199 200 201 202 |
struct bt_iter_data *iter_data = data; struct blk_mq_hw_ctx *hctx = iter_data->hctx; struct blk_mq_tags *tags = hctx->tags; bool reserved = iter_data->reserved; |
81481eb42 blk-mq: fix and s... |
203 |
struct request *rq; |
4bb659b15 blk-mq: implement... |
204 |
|
88459642c blk-mq: abstract ... |
205 206 207 |
if (!reserved) bitnr += tags->nr_reserved_tags; rq = tags->rqs[bitnr]; |
4bb659b15 blk-mq: implement... |
208 |
|
88459642c blk-mq: abstract ... |
209 210 211 212 |
if (rq->q == hctx->queue) iter_data->fn(hctx, rq, iter_data->data, reserved); return true; } |
4bb659b15 blk-mq: implement... |
213 |
|
88459642c blk-mq: abstract ... |
214 215 216 217 218 219 220 221 222 223 224 |
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, busy_iter_fn *fn, void *data, bool reserved) { struct bt_iter_data iter_data = { .hctx = hctx, .fn = fn, .data = data, .reserved = reserved, }; sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); |
320ae51fe blk-mq: new multi... |
225 |
} |
88459642c blk-mq: abstract ... |
226 227 228 229 230 231 232 233 |
struct bt_tags_iter_data { struct blk_mq_tags *tags; busy_tag_iter_fn *fn; void *data; bool reserved; }; static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
f26cdc853 blk-mq: Shared ta... |
234 |
{ |
88459642c blk-mq: abstract ... |
235 236 237 |
struct bt_tags_iter_data *iter_data = data; struct blk_mq_tags *tags = iter_data->tags; bool reserved = iter_data->reserved; |
f26cdc853 blk-mq: Shared ta... |
238 |
struct request *rq; |
f26cdc853 blk-mq: Shared ta... |
239 |
|
88459642c blk-mq: abstract ... |
240 241 242 |
if (!reserved) bitnr += tags->nr_reserved_tags; rq = tags->rqs[bitnr]; |
f26cdc853 blk-mq: Shared ta... |
243 |
|
88459642c blk-mq: abstract ... |
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 |
iter_data->fn(rq, iter_data->data, reserved); return true; } static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, busy_tag_iter_fn *fn, void *data, bool reserved) { struct bt_tags_iter_data iter_data = { .tags = tags, .fn = fn, .data = data, .reserved = reserved, }; if (tags->rqs) sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); |
f26cdc853 blk-mq: Shared ta... |
260 |
} |
e8f1e1630 blk-mq: Make blk_... |
261 262 |
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv) |
f26cdc853 blk-mq: Shared ta... |
263 264 |
{ if (tags->nr_reserved_tags) |
88459642c blk-mq: abstract ... |
265 266 |
bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true); bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false); |
f26cdc853 blk-mq: Shared ta... |
267 |
} |
f26cdc853 blk-mq: Shared ta... |
268 |
|
e0489487e blk-mq: Export ta... |
269 270 271 272 273 274 275 276 277 278 279 |
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv) { int i; for (i = 0; i < tagset->nr_hw_queues; i++) { if (tagset->tags && tagset->tags[i]) blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); } } EXPORT_SYMBOL(blk_mq_tagset_busy_iter); |
486cf9899 blk-mq: Introduce... |
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 |
int blk_mq_reinit_tagset(struct blk_mq_tag_set *set) { int i, j, ret = 0; if (!set->ops->reinit_request) goto out; for (i = 0; i < set->nr_hw_queues; i++) { struct blk_mq_tags *tags = set->tags[i]; for (j = 0; j < tags->nr_tags; j++) { if (!tags->rqs[j]) continue; ret = set->ops->reinit_request(set->driver_data, tags->rqs[j]); if (ret) goto out; } } out: return ret; } EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset); |
0bf6cd5b9 blk-mq: factor ou... |
305 |
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
81481eb42 blk-mq: fix and s... |
306 |
void *priv) |
320ae51fe blk-mq: new multi... |
307 |
{ |
0bf6cd5b9 blk-mq: factor ou... |
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 |
struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_tags *tags = hctx->tags; /* * If not software queues are currently mapped to this * hardware queue, there's nothing to check */ if (!blk_mq_hw_queue_mapped(hctx)) continue; if (tags->nr_reserved_tags) |
88459642c blk-mq: abstract ... |
323 324 |
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); |
4bb659b15 blk-mq: implement... |
325 |
} |
4bb659b15 blk-mq: implement... |
326 |
} |
88459642c blk-mq: abstract ... |
327 |
static unsigned int bt_unused_tags(const struct sbitmap_queue *bt) |
e3a2b3f93 blk-mq: allow cha... |
328 |
{ |
88459642c blk-mq: abstract ... |
329 |
return bt->sb.depth - sbitmap_weight(&bt->sb); |
e3a2b3f93 blk-mq: allow cha... |
330 |
} |
f4a644db8 sbitmap: push all... |
331 332 |
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, bool round_robin, int node) |
4bb659b15 blk-mq: implement... |
333 |
{ |
f4a644db8 sbitmap: push all... |
334 335 |
return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, node); |
4bb659b15 blk-mq: implement... |
336 337 338 |
} static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, |
24391c0dc blk-mq: add tag a... |
339 |
int node, int alloc_policy) |
4bb659b15 blk-mq: implement... |
340 341 |
{ unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; |
f4a644db8 sbitmap: push all... |
342 |
bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; |
4bb659b15 blk-mq: implement... |
343 |
|
f4a644db8 sbitmap: push all... |
344 |
if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) |
88459642c blk-mq: abstract ... |
345 |
goto free_tags; |
f4a644db8 sbitmap: push all... |
346 347 |
if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin, node)) |
88459642c blk-mq: abstract ... |
348 |
goto free_bitmap_tags; |
4bb659b15 blk-mq: implement... |
349 350 |
return tags; |
88459642c blk-mq: abstract ... |
351 352 353 |
free_bitmap_tags: sbitmap_queue_free(&tags->bitmap_tags); free_tags: |
4bb659b15 blk-mq: implement... |
354 355 356 |
kfree(tags); return NULL; } |
320ae51fe blk-mq: new multi... |
357 |
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
24391c0dc blk-mq: add tag a... |
358 359 |
unsigned int reserved_tags, int node, int alloc_policy) |
320ae51fe blk-mq: new multi... |
360 |
{ |
320ae51fe blk-mq: new multi... |
361 |
struct blk_mq_tags *tags; |
320ae51fe blk-mq: new multi... |
362 363 364 365 366 367 368 369 370 371 |
if (total_tags > BLK_MQ_TAG_MAX) { pr_err("blk-mq: tag depth too large "); return NULL; } tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); if (!tags) return NULL; |
320ae51fe blk-mq: new multi... |
372 373 |
tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; |
320ae51fe blk-mq: new multi... |
374 |
|
24391c0dc blk-mq: add tag a... |
375 |
return blk_mq_init_bitmap_tags(tags, node, alloc_policy); |
320ae51fe blk-mq: new multi... |
376 377 378 379 |
} void blk_mq_free_tags(struct blk_mq_tags *tags) { |
88459642c blk-mq: abstract ... |
380 381 |
sbitmap_queue_free(&tags->bitmap_tags); sbitmap_queue_free(&tags->breserved_tags); |
320ae51fe blk-mq: new multi... |
382 383 |
kfree(tags); } |
e3a2b3f93 blk-mq: allow cha... |
384 385 386 387 388 389 390 391 392 393 |
int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) { tdepth -= tags->nr_reserved_tags; if (tdepth > tags->nr_tags) return -EINVAL; /* * Don't need (or can't) update reserved tags here, they remain * static and should never need resizing. */ |
88459642c blk-mq: abstract ... |
394 |
sbitmap_queue_resize(&tags->bitmap_tags, tdepth); |
aed3ea94b block: wake up wa... |
395 |
blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f93 blk-mq: allow cha... |
396 397 |
return 0; } |
205fb5f5b blk-mq: add blk_m... |
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 |
/** * blk_mq_unique_tag() - return a tag that is unique queue-wide * @rq: request for which to compute a unique tag * * The tag field in struct request is unique per hardware queue but not over * all hardware queues. Hence this function that returns a tag with the * hardware context index in the upper bits and the per hardware queue tag in * the lower bits. * * Note: When called for a request that is queued on a non-multiqueue request * queue, the hardware context index is set to zero. */ u32 blk_mq_unique_tag(struct request *rq) { struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; int hwq = 0; if (q->mq_ops) { |
7d7e0f90b blk-mq: remove ->... |
417 |
hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); |
205fb5f5b blk-mq: add blk_m... |
418 419 420 421 422 423 424 |
hwq = hctx->queue_num; } return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); } EXPORT_SYMBOL(blk_mq_unique_tag); |
320ae51fe blk-mq: new multi... |
425 426 427 |
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) { char *orig_page = page; |
4bb659b15 blk-mq: implement... |
428 |
unsigned int free, res; |
320ae51fe blk-mq: new multi... |
429 430 431 |
if (!tags) return 0; |
59d13bf5f blk-mq: use spars... |
432 433 434 435 |
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " "bits_per_word=%u ", tags->nr_tags, tags->nr_reserved_tags, |
88459642c blk-mq: abstract ... |
436 |
1U << tags->bitmap_tags.sb.shift); |
320ae51fe blk-mq: new multi... |
437 |
|
4bb659b15 blk-mq: implement... |
438 439 |
free = bt_unused_tags(&tags->bitmap_tags); res = bt_unused_tags(&tags->breserved_tags); |
320ae51fe blk-mq: new multi... |
440 |
|
4bb659b15 blk-mq: implement... |
441 442 |
page += sprintf(page, "nr_free=%u, nr_reserved=%u ", free, res); |
0d2602ca3 blk-mq: improve s... |
443 444 |
page += sprintf(page, "active_queues=%u ", atomic_read(&tags->active_queues)); |
320ae51fe blk-mq: new multi... |
445 446 447 |
return page - orig_page; } |