Blame view

block/blk-mq-sysfs.c 11.3 KB
320ae51fe   Jens Axboe   blk-mq: new multi...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/backing-dev.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/mm.h>
  #include <linux/init.h>
  #include <linux/slab.h>
  #include <linux/workqueue.h>
  #include <linux/smp.h>
  
  #include <linux/blk-mq.h>
  #include "blk-mq.h"
  #include "blk-mq-tag.h"
  
  static void blk_mq_sysfs_release(struct kobject *kobj)
  {
  }
  
  struct blk_mq_ctx_sysfs_entry {
  	struct attribute attr;
  	ssize_t (*show)(struct blk_mq_ctx *, char *);
  	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  };
  
  struct blk_mq_hw_ctx_sysfs_entry {
  	struct attribute attr;
  	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  };
  
  static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  				 char *page)
  {
  	struct blk_mq_ctx_sysfs_entry *entry;
  	struct blk_mq_ctx *ctx;
  	struct request_queue *q;
  	ssize_t res;
  
  	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  	q = ctx->queue;
  
  	if (!entry->show)
  		return -EIO;
  
  	res = -ENOENT;
  	mutex_lock(&q->sysfs_lock);
  	if (!blk_queue_dying(q))
  		res = entry->show(ctx, page);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  				  const char *page, size_t length)
  {
  	struct blk_mq_ctx_sysfs_entry *entry;
  	struct blk_mq_ctx *ctx;
  	struct request_queue *q;
  	ssize_t res;
  
  	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  	q = ctx->queue;
  
  	if (!entry->store)
  		return -EIO;
  
  	res = -ENOENT;
  	mutex_lock(&q->sysfs_lock);
  	if (!blk_queue_dying(q))
  		res = entry->store(ctx, page, length);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  				    struct attribute *attr, char *page)
  {
  	struct blk_mq_hw_ctx_sysfs_entry *entry;
  	struct blk_mq_hw_ctx *hctx;
  	struct request_queue *q;
  	ssize_t res;
  
  	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  	q = hctx->queue;
  
  	if (!entry->show)
  		return -EIO;
  
  	res = -ENOENT;
  	mutex_lock(&q->sysfs_lock);
  	if (!blk_queue_dying(q))
  		res = entry->show(hctx, page);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
  				     struct attribute *attr, const char *page,
  				     size_t length)
  {
  	struct blk_mq_hw_ctx_sysfs_entry *entry;
  	struct blk_mq_hw_ctx *hctx;
  	struct request_queue *q;
  	ssize_t res;
  
  	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  	q = hctx->queue;
  
  	if (!entry->store)
  		return -EIO;
  
  	res = -ENOENT;
  	mutex_lock(&q->sysfs_lock);
  	if (!blk_queue_dying(q))
  		res = entry->store(hctx, page, length);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
  {
  	return sprintf(page, "%lu %lu
  ", ctx->rq_dispatched[1],
  				ctx->rq_dispatched[0]);
  }
  
  static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
  {
  	return sprintf(page, "%lu
  ", ctx->rq_merged);
  }
  
  static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
  {
  	return sprintf(page, "%lu %lu
  ", ctx->rq_completed[1],
  				ctx->rq_completed[0]);
  }
  
  static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
  {
320ae51fe   Jens Axboe   blk-mq: new multi...
147
  	struct request *rq;
596f5aad2   Ming Lei   blk-mq: fix buffe...
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
  	int len = snprintf(page, PAGE_SIZE - 1, "%s:
  ", msg);
  
  	list_for_each_entry(rq, list, queuelist) {
  		const int rq_len = 2 * sizeof(rq) + 2;
  
  		/* if the output will be truncated */
  		if (PAGE_SIZE - 1 < len + rq_len) {
  			/* backspacing if it can't hold '\t...
  ' */
  			if (PAGE_SIZE - 1 < len + 5)
  				len -= rq_len;
  			len += snprintf(page + len, PAGE_SIZE - 1 - len,
  					"\t...
  ");
  			break;
  		}
  		len += snprintf(page + len, PAGE_SIZE - 1 - len,
  				"\t%p
  ", rq);
  	}
  
  	return len;
320ae51fe   Jens Axboe   blk-mq: new multi...
171
172
173
174
175
176
177
178
179
180
181
182
  }
  
  static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
  {
  	ssize_t ret;
  
  	spin_lock(&ctx->lock);
  	ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
  	spin_unlock(&ctx->lock);
  
  	return ret;
  }
05229beed   Jens Axboe   block: add block ...
183
184
185
186
187
  static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
  {
  	return sprintf(page, "invoked=%lu, success=%lu
  ", hctx->poll_invoked, hctx->poll_success);
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
  static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
  					   char *page)
  {
  	return sprintf(page, "%lu
  ", hctx->queued);
  }
  
  static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
  {
  	return sprintf(page, "%lu
  ", hctx->run);
  }
  
  static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
  					       char *page)
  {
  	char *start_page = page;
  	int i;
  
  	page += sprintf(page, "%8u\t%lu
  ", 0U, hctx->dispatched[0]);
  
  	for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
  		unsigned long d = 1U << (i - 1);
  
  		page += sprintf(page, "%8lu\t%lu
  ", d, hctx->dispatched[i]);
  	}
  
  	return page - start_page;
  }
  
  static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
  					    char *page)
  {
  	ssize_t ret;
  
  	spin_lock(&hctx->lock);
  	ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
  	spin_unlock(&hctx->lock);
  
  	return ret;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
231
232
233
234
  static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
  {
  	return blk_mq_tag_sysfs_show(hctx->tags, page);
  }
0d2602ca3   Jens Axboe   blk-mq: improve s...
235
236
237
238
239
  static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
  {
  	return sprintf(page, "%u
  ", atomic_read(&hctx->nr_active));
  }
676141e48   Jens Axboe   blk-mq: don't dum...
240
241
  static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
  {
cb2da43e3   Jens Axboe   blk-mq: simplify ...
242
  	unsigned int i, first = 1;
676141e48   Jens Axboe   blk-mq: don't dum...
243
  	ssize_t ret = 0;
cb2da43e3   Jens Axboe   blk-mq: simplify ...
244
  	for_each_cpu(i, hctx->cpumask) {
676141e48   Jens Axboe   blk-mq: don't dum...
245
246
247
248
249
250
251
  		if (first)
  			ret += sprintf(ret + page, "%u", i);
  		else
  			ret += sprintf(ret + page, ", %u", i);
  
  		first = 0;
  	}
676141e48   Jens Axboe   blk-mq: don't dum...
252
253
254
255
  	ret += sprintf(ret + page, "
  ");
  	return ret;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
  static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
  	.attr = {.name = "dispatched", .mode = S_IRUGO },
  	.show = blk_mq_sysfs_dispatched_show,
  };
  static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
  	.attr = {.name = "merged", .mode = S_IRUGO },
  	.show = blk_mq_sysfs_merged_show,
  };
  static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
  	.attr = {.name = "completed", .mode = S_IRUGO },
  	.show = blk_mq_sysfs_completed_show,
  };
  static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
  	.attr = {.name = "rq_list", .mode = S_IRUGO },
  	.show = blk_mq_sysfs_rq_list_show,
  };
  
  static struct attribute *default_ctx_attrs[] = {
  	&blk_mq_sysfs_dispatched.attr,
  	&blk_mq_sysfs_merged.attr,
  	&blk_mq_sysfs_completed.attr,
  	&blk_mq_sysfs_rq_list.attr,
  	NULL,
  };
  
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
  	.attr = {.name = "queued", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_queued_show,
  };
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
  	.attr = {.name = "run", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_run_show,
  };
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
  	.attr = {.name = "dispatched", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_dispatched_show,
  };
0d2602ca3   Jens Axboe   blk-mq: improve s...
293
294
295
296
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
  	.attr = {.name = "active", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_active_show,
  };
320ae51fe   Jens Axboe   blk-mq: new multi...
297
298
299
300
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
  	.attr = {.name = "pending", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_rq_list_show,
  };
320ae51fe   Jens Axboe   blk-mq: new multi...
301
302
303
304
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
  	.attr = {.name = "tags", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_tags_show,
  };
676141e48   Jens Axboe   blk-mq: don't dum...
305
306
307
308
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
  	.attr = {.name = "cpu_list", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_cpus_show,
  };
05229beed   Jens Axboe   block: add block ...
309
310
311
312
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
  	.attr = {.name = "io_poll", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_poll_show,
  };
320ae51fe   Jens Axboe   blk-mq: new multi...
313
314
315
316
317
318
  
  static struct attribute *default_hw_ctx_attrs[] = {
  	&blk_mq_hw_sysfs_queued.attr,
  	&blk_mq_hw_sysfs_run.attr,
  	&blk_mq_hw_sysfs_dispatched.attr,
  	&blk_mq_hw_sysfs_pending.attr,
320ae51fe   Jens Axboe   blk-mq: new multi...
319
  	&blk_mq_hw_sysfs_tags.attr,
676141e48   Jens Axboe   blk-mq: don't dum...
320
  	&blk_mq_hw_sysfs_cpus.attr,
0d2602ca3   Jens Axboe   blk-mq: improve s...
321
  	&blk_mq_hw_sysfs_active.attr,
05229beed   Jens Axboe   block: add block ...
322
  	&blk_mq_hw_sysfs_poll.attr,
320ae51fe   Jens Axboe   blk-mq: new multi...
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
  	NULL,
  };
  
  static const struct sysfs_ops blk_mq_sysfs_ops = {
  	.show	= blk_mq_sysfs_show,
  	.store	= blk_mq_sysfs_store,
  };
  
  static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
  	.show	= blk_mq_hw_sysfs_show,
  	.store	= blk_mq_hw_sysfs_store,
  };
  
  static struct kobj_type blk_mq_ktype = {
  	.sysfs_ops	= &blk_mq_sysfs_ops,
  	.release	= blk_mq_sysfs_release,
  };
  
  static struct kobj_type blk_mq_ctx_ktype = {
  	.sysfs_ops	= &blk_mq_sysfs_ops,
  	.default_attrs	= default_ctx_attrs,
74170118b   Ming Lei   Revert "blk-mq: f...
344
  	.release	= blk_mq_sysfs_release,
320ae51fe   Jens Axboe   blk-mq: new multi...
345
346
347
348
349
  };
  
  static struct kobj_type blk_mq_hw_ktype = {
  	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
  	.default_attrs	= default_hw_ctx_attrs,
74170118b   Ming Lei   Revert "blk-mq: f...
350
  	.release	= blk_mq_sysfs_release,
320ae51fe   Jens Axboe   blk-mq: new multi...
351
  };
ee3c5db08   Fengguang Wu   blk-mq: blk_mq_un...
352
  static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
67aec14ce   Jens Axboe   blk-mq: make the ...
353
354
355
  {
  	struct blk_mq_ctx *ctx;
  	int i;
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
356
  	if (!hctx->nr_ctx)
67aec14ce   Jens Axboe   blk-mq: make the ...
357
358
359
360
361
362
363
  		return;
  
  	hctx_for_each_ctx(hctx, ctx, i)
  		kobject_del(&ctx->kobj);
  
  	kobject_del(&hctx->kobj);
  }
ee3c5db08   Fengguang Wu   blk-mq: blk_mq_un...
364
  static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
67aec14ce   Jens Axboe   blk-mq: make the ...
365
366
367
368
  {
  	struct request_queue *q = hctx->queue;
  	struct blk_mq_ctx *ctx;
  	int i, ret;
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
369
  	if (!hctx->nr_ctx)
67aec14ce   Jens Axboe   blk-mq: make the ...
370
371
372
373
374
375
376
377
378
379
380
381
382
383
  		return 0;
  
  	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
  	if (ret)
  		return ret;
  
  	hctx_for_each_ctx(hctx, ctx, i) {
  		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
  		if (ret)
  			break;
  	}
  
  	return ret;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
384
385
386
  void blk_mq_unregister_disk(struct gendisk *disk)
  {
  	struct request_queue *q = disk->queue;
851573660   Andrey Vagin   block: fix memory...
387
388
389
  	struct blk_mq_hw_ctx *hctx;
  	struct blk_mq_ctx *ctx;
  	int i, j;
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
390
  	blk_mq_disable_hotplug();
851573660   Andrey Vagin   block: fix memory...
391
  	queue_for_each_hw_ctx(q, hctx, i) {
67aec14ce   Jens Axboe   blk-mq: make the ...
392
393
394
  		blk_mq_unregister_hctx(hctx);
  
  		hctx_for_each_ctx(hctx, ctx, j)
851573660   Andrey Vagin   block: fix memory...
395
  			kobject_put(&ctx->kobj);
67aec14ce   Jens Axboe   blk-mq: make the ...
396

851573660   Andrey Vagin   block: fix memory...
397
398
  		kobject_put(&hctx->kobj);
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
399
400
401
  
  	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
  	kobject_del(&q->mq_kobj);
851573660   Andrey Vagin   block: fix memory...
402
  	kobject_put(&q->mq_kobj);
320ae51fe   Jens Axboe   blk-mq: new multi...
403
404
  
  	kobject_put(&disk_to_dev(disk)->kobj);
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
405
406
407
  
  	q->mq_sysfs_init_done = false;
  	blk_mq_enable_hotplug();
320ae51fe   Jens Axboe   blk-mq: new multi...
408
  }
868f2f0b7   Keith Busch   blk-mq: dynamic h...
409
410
411
412
  void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
  {
  	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
  }
67aec14ce   Jens Axboe   blk-mq: make the ...
413
414
  static void blk_mq_sysfs_init(struct request_queue *q)
  {
67aec14ce   Jens Axboe   blk-mq: make the ...
415
  	struct blk_mq_ctx *ctx;
897bb0c7f   Thomas Gleixner   blk-mq: Use prope...
416
  	int cpu;
67aec14ce   Jens Axboe   blk-mq: make the ...
417
418
  
  	kobject_init(&q->mq_kobj, &blk_mq_ktype);
897bb0c7f   Thomas Gleixner   blk-mq: Use prope...
419
420
  	for_each_possible_cpu(cpu) {
  		ctx = per_cpu_ptr(q->queue_ctx, cpu);
06a41a99d   Takashi Iwai   blk-mq: Fix unini...
421
  		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
897bb0c7f   Thomas Gleixner   blk-mq: Use prope...
422
  	}
67aec14ce   Jens Axboe   blk-mq: make the ...
423
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
424
425
426
427
428
  int blk_mq_register_disk(struct gendisk *disk)
  {
  	struct device *dev = disk_to_dev(disk);
  	struct request_queue *q = disk->queue;
  	struct blk_mq_hw_ctx *hctx;
67aec14ce   Jens Axboe   blk-mq: make the ...
429
  	int ret, i;
320ae51fe   Jens Axboe   blk-mq: new multi...
430

4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
431
  	blk_mq_disable_hotplug();
67aec14ce   Jens Axboe   blk-mq: make the ...
432
  	blk_mq_sysfs_init(q);
320ae51fe   Jens Axboe   blk-mq: new multi...
433
434
435
  
  	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
  	if (ret < 0)
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
436
  		goto out;
320ae51fe   Jens Axboe   blk-mq: new multi...
437
438
439
440
  
  	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
  
  	queue_for_each_hw_ctx(q, hctx, i) {
67aec14ce   Jens Axboe   blk-mq: make the ...
441
  		ret = blk_mq_register_hctx(hctx);
320ae51fe   Jens Axboe   blk-mq: new multi...
442
443
  		if (ret)
  			break;
320ae51fe   Jens Axboe   blk-mq: new multi...
444
  	}
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
445
  	if (ret)
320ae51fe   Jens Axboe   blk-mq: new multi...
446
  		blk_mq_unregister_disk(disk);
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
447
448
449
450
  	else
  		q->mq_sysfs_init_done = true;
  out:
  	blk_mq_enable_hotplug();
320ae51fe   Jens Axboe   blk-mq: new multi...
451

4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
452
  	return ret;
320ae51fe   Jens Axboe   blk-mq: new multi...
453
  }
b62c21b71   Mike Snitzer   blk-mq: add blk_m...
454
  EXPORT_SYMBOL_GPL(blk_mq_register_disk);
67aec14ce   Jens Axboe   blk-mq: make the ...
455
456
457
458
459
  
  void blk_mq_sysfs_unregister(struct request_queue *q)
  {
  	struct blk_mq_hw_ctx *hctx;
  	int i;
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
460
461
  	if (!q->mq_sysfs_init_done)
  		return;
67aec14ce   Jens Axboe   blk-mq: make the ...
462
463
464
465
466
467
468
469
  	queue_for_each_hw_ctx(q, hctx, i)
  		blk_mq_unregister_hctx(hctx);
  }
  
  int blk_mq_sysfs_register(struct request_queue *q)
  {
  	struct blk_mq_hw_ctx *hctx;
  	int i, ret = 0;
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
470
471
  	if (!q->mq_sysfs_init_done)
  		return ret;
67aec14ce   Jens Axboe   blk-mq: make the ...
472
473
474
475
476
477
478
479
  	queue_for_each_hw_ctx(q, hctx, i) {
  		ret = blk_mq_register_hctx(hctx);
  		if (ret)
  			break;
  	}
  
  	return ret;
  }