Blame view

block/blk-mq-sysfs.c 11.7 KB
320ae51fe   Jens Axboe   blk-mq: new multi...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/backing-dev.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/mm.h>
  #include <linux/init.h>
  #include <linux/slab.h>
  #include <linux/workqueue.h>
  #include <linux/smp.h>
  
  #include <linux/blk-mq.h>
  #include "blk-mq.h"
  #include "blk-mq-tag.h"
  
  static void blk_mq_sysfs_release(struct kobject *kobj)
  {
  }
  
  struct blk_mq_ctx_sysfs_entry {
  	struct attribute attr;
  	ssize_t (*show)(struct blk_mq_ctx *, char *);
  	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  };
  
  struct blk_mq_hw_ctx_sysfs_entry {
  	struct attribute attr;
  	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  };
  
  static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  				 char *page)
  {
  	struct blk_mq_ctx_sysfs_entry *entry;
  	struct blk_mq_ctx *ctx;
  	struct request_queue *q;
  	ssize_t res;
  
  	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  	q = ctx->queue;
  
  	if (!entry->show)
  		return -EIO;
  
  	res = -ENOENT;
  	mutex_lock(&q->sysfs_lock);
  	if (!blk_queue_dying(q))
  		res = entry->show(ctx, page);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  				  const char *page, size_t length)
  {
  	struct blk_mq_ctx_sysfs_entry *entry;
  	struct blk_mq_ctx *ctx;
  	struct request_queue *q;
  	ssize_t res;
  
  	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  	q = ctx->queue;
  
  	if (!entry->store)
  		return -EIO;
  
  	res = -ENOENT;
  	mutex_lock(&q->sysfs_lock);
  	if (!blk_queue_dying(q))
  		res = entry->store(ctx, page, length);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  				    struct attribute *attr, char *page)
  {
  	struct blk_mq_hw_ctx_sysfs_entry *entry;
  	struct blk_mq_hw_ctx *hctx;
  	struct request_queue *q;
  	ssize_t res;
  
  	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  	q = hctx->queue;
  
  	if (!entry->show)
  		return -EIO;
  
  	res = -ENOENT;
  	mutex_lock(&q->sysfs_lock);
  	if (!blk_queue_dying(q))
  		res = entry->show(hctx, page);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
  				     struct attribute *attr, const char *page,
  				     size_t length)
  {
  	struct blk_mq_hw_ctx_sysfs_entry *entry;
  	struct blk_mq_hw_ctx *hctx;
  	struct request_queue *q;
  	ssize_t res;
  
  	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  	q = hctx->queue;
  
  	if (!entry->store)
  		return -EIO;
  
  	res = -ENOENT;
  	mutex_lock(&q->sysfs_lock);
  	if (!blk_queue_dying(q))
  		res = entry->store(hctx, page, length);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
  {
  	return sprintf(page, "%lu %lu
  ", ctx->rq_dispatched[1],
  				ctx->rq_dispatched[0]);
  }
  
  static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
  {
  	return sprintf(page, "%lu
  ", ctx->rq_merged);
  }
  
  static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
  {
  	return sprintf(page, "%lu %lu
  ", ctx->rq_completed[1],
  				ctx->rq_completed[0]);
  }
  
  static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
  {
320ae51fe   Jens Axboe   blk-mq: new multi...
147
  	struct request *rq;
596f5aad2   Ming Lei   blk-mq: fix buffe...
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
  	int len = snprintf(page, PAGE_SIZE - 1, "%s:
  ", msg);
  
  	list_for_each_entry(rq, list, queuelist) {
  		const int rq_len = 2 * sizeof(rq) + 2;
  
  		/* if the output will be truncated */
  		if (PAGE_SIZE - 1 < len + rq_len) {
  			/* backspacing if it can't hold '\t...
  ' */
  			if (PAGE_SIZE - 1 < len + 5)
  				len -= rq_len;
  			len += snprintf(page + len, PAGE_SIZE - 1 - len,
  					"\t...
  ");
  			break;
  		}
  		len += snprintf(page + len, PAGE_SIZE - 1 - len,
  				"\t%p
  ", rq);
  	}
  
  	return len;
320ae51fe   Jens Axboe   blk-mq: new multi...
171
172
173
174
175
176
177
178
179
180
181
182
  }
  
  static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
  {
  	ssize_t ret;
  
  	spin_lock(&ctx->lock);
  	ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
  	spin_unlock(&ctx->lock);
  
  	return ret;
  }
05229beed   Jens Axboe   block: add block ...
183
184
  static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
  {
6e219353a   Stephen Bates   block: add poll_c...
185
186
187
188
  	return sprintf(page, "considered=%lu, invoked=%lu, success=%lu
  ",
  		       hctx->poll_considered, hctx->poll_invoked,
  		       hctx->poll_success);
05229beed   Jens Axboe   block: add block ...
189
  }
d21ea4bc0   Stephen Bates   block: enable zer...
190
191
192
193
194
195
196
  static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
  					  const char *page, size_t size)
  {
  	hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
  
  	return size;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
  static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
  					   char *page)
  {
  	return sprintf(page, "%lu
  ", hctx->queued);
  }
  
  static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
  {
  	return sprintf(page, "%lu
  ", hctx->run);
  }
  
  static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
  					       char *page)
  {
  	char *start_page = page;
  	int i;
  
  	page += sprintf(page, "%8u\t%lu
  ", 0U, hctx->dispatched[0]);
703fd1c0f   Jens Axboe   blk-mq: account h...
218
219
  	for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
  		unsigned int d = 1U << (i - 1);
320ae51fe   Jens Axboe   blk-mq: new multi...
220

703fd1c0f   Jens Axboe   blk-mq: account h...
221
222
  		page += sprintf(page, "%8u\t%lu
  ", d, hctx->dispatched[i]);
320ae51fe   Jens Axboe   blk-mq: new multi...
223
  	}
703fd1c0f   Jens Axboe   blk-mq: account h...
224
225
226
  	page += sprintf(page, "%8u+\t%lu
  ", 1U << (i - 1),
  						hctx->dispatched[i]);
320ae51fe   Jens Axboe   blk-mq: new multi...
227
228
229
230
231
232
233
234
235
236
237
238
239
240
  	return page - start_page;
  }
  
  static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
  					    char *page)
  {
  	ssize_t ret;
  
  	spin_lock(&hctx->lock);
  	ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
  	spin_unlock(&hctx->lock);
  
  	return ret;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
241
242
243
244
  static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
  {
  	return blk_mq_tag_sysfs_show(hctx->tags, page);
  }
0d2602ca3   Jens Axboe   blk-mq: improve s...
245
246
247
248
249
  static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
  {
  	return sprintf(page, "%u
  ", atomic_read(&hctx->nr_active));
  }
676141e48   Jens Axboe   blk-mq: don't dum...
250
251
  static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
  {
cb2da43e3   Jens Axboe   blk-mq: simplify ...
252
  	unsigned int i, first = 1;
676141e48   Jens Axboe   blk-mq: don't dum...
253
  	ssize_t ret = 0;
cb2da43e3   Jens Axboe   blk-mq: simplify ...
254
  	for_each_cpu(i, hctx->cpumask) {
676141e48   Jens Axboe   blk-mq: don't dum...
255
256
257
258
259
260
261
  		if (first)
  			ret += sprintf(ret + page, "%u", i);
  		else
  			ret += sprintf(ret + page, ", %u", i);
  
  		first = 0;
  	}
676141e48   Jens Axboe   blk-mq: don't dum...
262
263
264
265
  	ret += sprintf(ret + page, "
  ");
  	return ret;
  }
320ae51fe   Jens Axboe   blk-mq: new multi...
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
  static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
  	.attr = {.name = "dispatched", .mode = S_IRUGO },
  	.show = blk_mq_sysfs_dispatched_show,
  };
  static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
  	.attr = {.name = "merged", .mode = S_IRUGO },
  	.show = blk_mq_sysfs_merged_show,
  };
  static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
  	.attr = {.name = "completed", .mode = S_IRUGO },
  	.show = blk_mq_sysfs_completed_show,
  };
  static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
  	.attr = {.name = "rq_list", .mode = S_IRUGO },
  	.show = blk_mq_sysfs_rq_list_show,
  };
  
  static struct attribute *default_ctx_attrs[] = {
  	&blk_mq_sysfs_dispatched.attr,
  	&blk_mq_sysfs_merged.attr,
  	&blk_mq_sysfs_completed.attr,
  	&blk_mq_sysfs_rq_list.attr,
  	NULL,
  };
  
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
  	.attr = {.name = "queued", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_queued_show,
  };
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
  	.attr = {.name = "run", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_run_show,
  };
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
  	.attr = {.name = "dispatched", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_dispatched_show,
  };
0d2602ca3   Jens Axboe   blk-mq: improve s...
303
304
305
306
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
  	.attr = {.name = "active", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_active_show,
  };
320ae51fe   Jens Axboe   blk-mq: new multi...
307
308
309
310
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
  	.attr = {.name = "pending", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_rq_list_show,
  };
320ae51fe   Jens Axboe   blk-mq: new multi...
311
312
313
314
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
  	.attr = {.name = "tags", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_tags_show,
  };
676141e48   Jens Axboe   blk-mq: don't dum...
315
316
317
318
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
  	.attr = {.name = "cpu_list", .mode = S_IRUGO },
  	.show = blk_mq_hw_sysfs_cpus_show,
  };
05229beed   Jens Axboe   block: add block ...
319
  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
d21ea4bc0   Stephen Bates   block: enable zer...
320
  	.attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
05229beed   Jens Axboe   block: add block ...
321
  	.show = blk_mq_hw_sysfs_poll_show,
d21ea4bc0   Stephen Bates   block: enable zer...
322
  	.store = blk_mq_hw_sysfs_poll_store,
05229beed   Jens Axboe   block: add block ...
323
  };
320ae51fe   Jens Axboe   blk-mq: new multi...
324
325
326
327
328
329
  
  static struct attribute *default_hw_ctx_attrs[] = {
  	&blk_mq_hw_sysfs_queued.attr,
  	&blk_mq_hw_sysfs_run.attr,
  	&blk_mq_hw_sysfs_dispatched.attr,
  	&blk_mq_hw_sysfs_pending.attr,
320ae51fe   Jens Axboe   blk-mq: new multi...
330
  	&blk_mq_hw_sysfs_tags.attr,
676141e48   Jens Axboe   blk-mq: don't dum...
331
  	&blk_mq_hw_sysfs_cpus.attr,
0d2602ca3   Jens Axboe   blk-mq: improve s...
332
  	&blk_mq_hw_sysfs_active.attr,
05229beed   Jens Axboe   block: add block ...
333
  	&blk_mq_hw_sysfs_poll.attr,
320ae51fe   Jens Axboe   blk-mq: new multi...
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
  	NULL,
  };
  
  static const struct sysfs_ops blk_mq_sysfs_ops = {
  	.show	= blk_mq_sysfs_show,
  	.store	= blk_mq_sysfs_store,
  };
  
  static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
  	.show	= blk_mq_hw_sysfs_show,
  	.store	= blk_mq_hw_sysfs_store,
  };
  
  static struct kobj_type blk_mq_ktype = {
  	.sysfs_ops	= &blk_mq_sysfs_ops,
  	.release	= blk_mq_sysfs_release,
  };
  
  static struct kobj_type blk_mq_ctx_ktype = {
  	.sysfs_ops	= &blk_mq_sysfs_ops,
  	.default_attrs	= default_ctx_attrs,
74170118b   Ming Lei   Revert "blk-mq: f...
355
  	.release	= blk_mq_sysfs_release,
320ae51fe   Jens Axboe   blk-mq: new multi...
356
357
358
359
360
  };
  
  static struct kobj_type blk_mq_hw_ktype = {
  	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
  	.default_attrs	= default_hw_ctx_attrs,
74170118b   Ming Lei   Revert "blk-mq: f...
361
  	.release	= blk_mq_sysfs_release,
320ae51fe   Jens Axboe   blk-mq: new multi...
362
  };
ee3c5db08   Fengguang Wu   blk-mq: blk_mq_un...
363
  static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
67aec14ce   Jens Axboe   blk-mq: make the ...
364
365
366
  {
  	struct blk_mq_ctx *ctx;
  	int i;
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
367
  	if (!hctx->nr_ctx)
67aec14ce   Jens Axboe   blk-mq: make the ...
368
369
370
371
372
373
374
  		return;
  
  	hctx_for_each_ctx(hctx, ctx, i)
  		kobject_del(&ctx->kobj);
  
  	kobject_del(&hctx->kobj);
  }
ee3c5db08   Fengguang Wu   blk-mq: blk_mq_un...
375
  static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
67aec14ce   Jens Axboe   blk-mq: make the ...
376
377
378
379
  {
  	struct request_queue *q = hctx->queue;
  	struct blk_mq_ctx *ctx;
  	int i, ret;
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
380
  	if (!hctx->nr_ctx)
67aec14ce   Jens Axboe   blk-mq: make the ...
381
382
383
384
385
386
387
388
389
390
391
392
393
394
  		return 0;
  
  	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
  	if (ret)
  		return ret;
  
  	hctx_for_each_ctx(hctx, ctx, i) {
  		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
  		if (ret)
  			break;
  	}
  
  	return ret;
  }
b21d5b301   Matias Bjørling   blk-mq: register ...
395
  static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
320ae51fe   Jens Axboe   blk-mq: new multi...
396
  {
851573660   Andrey Vagin   block: fix memory...
397
398
399
400
401
  	struct blk_mq_hw_ctx *hctx;
  	struct blk_mq_ctx *ctx;
  	int i, j;
  
  	queue_for_each_hw_ctx(q, hctx, i) {
67aec14ce   Jens Axboe   blk-mq: make the ...
402
403
404
  		blk_mq_unregister_hctx(hctx);
  
  		hctx_for_each_ctx(hctx, ctx, j)
851573660   Andrey Vagin   block: fix memory...
405
  			kobject_put(&ctx->kobj);
67aec14ce   Jens Axboe   blk-mq: make the ...
406

851573660   Andrey Vagin   block: fix memory...
407
408
  		kobject_put(&hctx->kobj);
  	}
320ae51fe   Jens Axboe   blk-mq: new multi...
409
410
411
  
  	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
  	kobject_del(&q->mq_kobj);
851573660   Andrey Vagin   block: fix memory...
412
  	kobject_put(&q->mq_kobj);
320ae51fe   Jens Axboe   blk-mq: new multi...
413

b21d5b301   Matias Bjørling   blk-mq: register ...
414
  	kobject_put(&dev->kobj);
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
415
416
  
  	q->mq_sysfs_init_done = false;
c0f3fd2b3   Jens Axboe   blk-mq: fix deadl...
417
  }
b21d5b301   Matias Bjørling   blk-mq: register ...
418
  void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
c0f3fd2b3   Jens Axboe   blk-mq: fix deadl...
419
420
  {
  	blk_mq_disable_hotplug();
b21d5b301   Matias Bjørling   blk-mq: register ...
421
  	__blk_mq_unregister_dev(dev, q);
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
422
  	blk_mq_enable_hotplug();
320ae51fe   Jens Axboe   blk-mq: new multi...
423
  }
868f2f0b7   Keith Busch   blk-mq: dynamic h...
424
425
426
427
  void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
  {
  	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
  }
67aec14ce   Jens Axboe   blk-mq: make the ...
428
429
  static void blk_mq_sysfs_init(struct request_queue *q)
  {
67aec14ce   Jens Axboe   blk-mq: make the ...
430
  	struct blk_mq_ctx *ctx;
897bb0c7f   Thomas Gleixner   blk-mq: Use prope...
431
  	int cpu;
67aec14ce   Jens Axboe   blk-mq: make the ...
432
433
  
  	kobject_init(&q->mq_kobj, &blk_mq_ktype);
897bb0c7f   Thomas Gleixner   blk-mq: Use prope...
434
435
  	for_each_possible_cpu(cpu) {
  		ctx = per_cpu_ptr(q->queue_ctx, cpu);
06a41a99d   Takashi Iwai   blk-mq: Fix unini...
436
  		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
897bb0c7f   Thomas Gleixner   blk-mq: Use prope...
437
  	}
67aec14ce   Jens Axboe   blk-mq: make the ...
438
  }
b21d5b301   Matias Bjørling   blk-mq: register ...
439
  int blk_mq_register_dev(struct device *dev, struct request_queue *q)
320ae51fe   Jens Axboe   blk-mq: new multi...
440
  {
320ae51fe   Jens Axboe   blk-mq: new multi...
441
  	struct blk_mq_hw_ctx *hctx;
67aec14ce   Jens Axboe   blk-mq: make the ...
442
  	int ret, i;
320ae51fe   Jens Axboe   blk-mq: new multi...
443

4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
444
  	blk_mq_disable_hotplug();
67aec14ce   Jens Axboe   blk-mq: make the ...
445
  	blk_mq_sysfs_init(q);
320ae51fe   Jens Axboe   blk-mq: new multi...
446
447
448
  
  	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
  	if (ret < 0)
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
449
  		goto out;
320ae51fe   Jens Axboe   blk-mq: new multi...
450
451
452
453
  
  	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
  
  	queue_for_each_hw_ctx(q, hctx, i) {
67aec14ce   Jens Axboe   blk-mq: make the ...
454
  		ret = blk_mq_register_hctx(hctx);
320ae51fe   Jens Axboe   blk-mq: new multi...
455
456
  		if (ret)
  			break;
320ae51fe   Jens Axboe   blk-mq: new multi...
457
  	}
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
458
  	if (ret)
b21d5b301   Matias Bjørling   blk-mq: register ...
459
  		__blk_mq_unregister_dev(dev, q);
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
460
461
462
463
  	else
  		q->mq_sysfs_init_done = true;
  out:
  	blk_mq_enable_hotplug();
320ae51fe   Jens Axboe   blk-mq: new multi...
464

4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
465
  	return ret;
320ae51fe   Jens Axboe   blk-mq: new multi...
466
  }
b21d5b301   Matias Bjørling   blk-mq: register ...
467
  EXPORT_SYMBOL_GPL(blk_mq_register_dev);
67aec14ce   Jens Axboe   blk-mq: make the ...
468
469
470
471
472
  
  void blk_mq_sysfs_unregister(struct request_queue *q)
  {
  	struct blk_mq_hw_ctx *hctx;
  	int i;
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
473
474
  	if (!q->mq_sysfs_init_done)
  		return;
67aec14ce   Jens Axboe   blk-mq: make the ...
475
476
477
478
479
480
481
482
  	queue_for_each_hw_ctx(q, hctx, i)
  		blk_mq_unregister_hctx(hctx);
  }
  
  int blk_mq_sysfs_register(struct request_queue *q)
  {
  	struct blk_mq_hw_ctx *hctx;
  	int i, ret = 0;
4593fdbe7   Akinobu Mita   blk-mq: fix sysfs...
483
484
  	if (!q->mq_sysfs_init_done)
  		return ret;
67aec14ce   Jens Axboe   blk-mq: make the ...
485
486
487
488
489
490
491
492
  	queue_for_each_hw_ctx(q, hctx, i) {
  		ret = blk_mq_register_hctx(hctx);
  		if (ret)
  			break;
  	}
  
  	return ret;
  }