Blame view
block/blk-sysfs.c
15.8 KB
8324aa91d
|
1 2 3 4 |
/* * Functions related to sysfs handling */ #include <linux/kernel.h> |
5a0e3ad6a
|
5 |
#include <linux/slab.h> |
8324aa91d
|
6 7 8 9 10 11 |
#include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/blktrace_api.h> #include "blk.h" |
5efd61135
|
12 |
#include "blk-cgroup.h" |
8324aa91d
|
13 14 15 16 17 18 19 20 |
struct queue_sysfs_entry { struct attribute attr; ssize_t (*show)(struct request_queue *, char *); ssize_t (*store)(struct request_queue *, const char *, size_t); }; static ssize_t |
9cb308ce8
|
21 |
queue_var_show(unsigned long var, char *page) |
8324aa91d
|
22 |
{ |
9cb308ce8
|
23 24 |
return sprintf(page, "%lu ", var); |
8324aa91d
|
25 26 27 28 29 |
} static ssize_t queue_var_store(unsigned long *var, const char *page, size_t count) { |
b1f3b64d7
|
30 31 32 33 34 35 36 37 |
int err; unsigned long v; err = strict_strtoul(page, 10, &v); if (err || v > UINT_MAX) return -EINVAL; *var = v; |
8324aa91d
|
38 |
|
8324aa91d
|
39 40 41 42 43 44 45 46 47 48 49 |
return count; } static ssize_t queue_requests_show(struct request_queue *q, char *page) { return queue_var_show(q->nr_requests, (page)); } static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { |
a051661ca
|
50 |
struct request_list *rl; |
8324aa91d
|
51 |
unsigned long nr; |
b8a9ae779
|
52 53 54 55 56 57 |
int ret; if (!q->request_fn) return -EINVAL; ret = queue_var_store(&nr, page, count); |
b1f3b64d7
|
58 59 |
if (ret < 0) return ret; |
8324aa91d
|
60 61 62 63 64 65 |
if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; spin_lock_irq(q->queue_lock); q->nr_requests = nr; blk_queue_congestion_threshold(q); |
a051661ca
|
66 67 |
/* congestion isn't cgroup aware and follows root blkcg for now */ rl = &q->root_rl; |
1faa16d22
|
68 69 70 71 72 73 74 75 76 |
if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_SYNC); else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_SYNC); if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_ASYNC); else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_ASYNC); |
a051661ca
|
77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
blk_queue_for_each_rl(rl, q) { if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { blk_set_rl_full(rl, BLK_RW_SYNC); } else { blk_clear_rl_full(rl, BLK_RW_SYNC); wake_up(&rl->wait[BLK_RW_SYNC]); } if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { blk_set_rl_full(rl, BLK_RW_ASYNC); } else { blk_clear_rl_full(rl, BLK_RW_ASYNC); wake_up(&rl->wait[BLK_RW_ASYNC]); } |
8324aa91d
|
91 |
} |
8324aa91d
|
92 93 94 95 96 97 |
spin_unlock_irq(q->queue_lock); return ret; } static ssize_t queue_ra_show(struct request_queue *q, char *page) { |
9cb308ce8
|
98 99 |
unsigned long ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10); |
8324aa91d
|
100 101 102 103 104 105 106 107 108 |
return queue_var_show(ra_kb, (page)); } static ssize_t queue_ra_store(struct request_queue *q, const char *page, size_t count) { unsigned long ra_kb; ssize_t ret = queue_var_store(&ra_kb, page, count); |
b1f3b64d7
|
109 110 |
if (ret < 0) return ret; |
8324aa91d
|
111 |
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
8324aa91d
|
112 113 114 115 116 117 |
return ret; } static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) { |
ae03bf639
|
118 |
int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91d
|
119 120 121 |
return queue_var_show(max_sectors_kb, (page)); } |
c77a5710b
|
122 123 124 125 |
static ssize_t queue_max_segments_show(struct request_queue *q, char *page) { return queue_var_show(queue_max_segments(q), (page)); } |
13f05c8d8
|
126 127 128 129 |
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.max_integrity_segments, (page)); } |
c77a5710b
|
130 131 |
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) { |
e692cb668
|
132 |
if (blk_queue_cluster(q)) |
c77a5710b
|
133 134 135 136 |
return queue_var_show(queue_max_segment_size(q), (page)); return queue_var_show(PAGE_CACHE_SIZE, (page)); } |
e1defc4ff
|
137 |
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c6
|
138 |
{ |
e1defc4ff
|
139 |
return queue_var_show(queue_logical_block_size(q), page); |
e68b903c6
|
140 |
} |
c72758f33
|
141 142 143 144 145 146 147 148 149 150 151 152 153 |
static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) { return queue_var_show(queue_physical_block_size(q), page); } static ssize_t queue_io_min_show(struct request_queue *q, char *page) { return queue_var_show(queue_io_min(q), page); } static ssize_t queue_io_opt_show(struct request_queue *q, char *page) { return queue_var_show(queue_io_opt(q), page); |
e68b903c6
|
154 |
} |
86b372814
|
155 156 157 158 159 160 161 |
static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.discard_granularity, page); } static ssize_t queue_discard_max_show(struct request_queue *q, char *page) { |
a934a00a6
|
162 163 164 |
return sprintf(page, "%llu ", (unsigned long long)q->limits.max_discard_sectors << 9); |
86b372814
|
165 |
} |
98262f276
|
166 167 168 169 |
static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) { return queue_var_show(queue_discard_zeroes_data(q), page); } |
4363ac7c1
|
170 171 172 173 174 175 |
static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) { return sprintf(page, "%llu ", (unsigned long long)q->limits.max_write_same_sectors << 9); } |
8324aa91d
|
176 177 178 179 |
static ssize_t queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_sectors_kb, |
ae03bf639
|
180 |
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
8324aa91d
|
181 182 |
page_kb = 1 << (PAGE_CACHE_SHIFT - 10); ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
b1f3b64d7
|
183 184 |
if (ret < 0) return ret; |
8324aa91d
|
185 186 |
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) return -EINVAL; |
7c239517d
|
187 |
|
8324aa91d
|
188 |
spin_lock_irq(q->queue_lock); |
c295fc057
|
189 |
q->limits.max_sectors = max_sectors_kb << 1; |
8324aa91d
|
190 191 192 193 194 195 196 |
spin_unlock_irq(q->queue_lock); return ret; } static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) { |
ae03bf639
|
197 |
int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91d
|
198 199 200 |
return queue_var_show(max_hw_sectors_kb, (page)); } |
956bcb7c1
|
201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ static ssize_t \ queue_show_##name(struct request_queue *q, char *page) \ { \ int bit; \ bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ return queue_var_show(neg ? !bit : bit, page); \ } \ static ssize_t \ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ { \ unsigned long val; \ ssize_t ret; \ ret = queue_var_store(&val, page, count); \ |
c678ef528
|
215 216 |
if (ret < 0) \ return ret; \ |
956bcb7c1
|
217 218 219 220 221 222 223 224 225 226 |
if (neg) \ val = !val; \ \ spin_lock_irq(q->queue_lock); \ if (val) \ queue_flag_set(QUEUE_FLAG_##flag, q); \ else \ queue_flag_clear(QUEUE_FLAG_##flag, q); \ spin_unlock_irq(q->queue_lock); \ return ret; \ |
1308835ff
|
227 |
} |
956bcb7c1
|
228 229 230 231 |
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); #undef QUEUE_SYSFS_BIT_FNS |
1308835ff
|
232 |
|
ac9fafa12
|
233 234 |
static ssize_t queue_nomerges_show(struct request_queue *q, char *page) { |
488991e28
|
235 236 |
return queue_var_show((blk_queue_nomerges(q) << 1) | blk_queue_noxmerges(q), page); |
ac9fafa12
|
237 238 239 240 241 242 243 |
} static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, size_t count) { unsigned long nm; ssize_t ret = queue_var_store(&nm, page, count); |
b1f3b64d7
|
244 245 |
if (ret < 0) return ret; |
bf0f97025
|
246 |
spin_lock_irq(q->queue_lock); |
488991e28
|
247 248 249 |
queue_flag_clear(QUEUE_FLAG_NOMERGES, q); queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); if (nm == 2) |
bf0f97025
|
250 |
queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e28
|
251 252 |
else if (nm) queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
bf0f97025
|
253 |
spin_unlock_irq(q->queue_lock); |
1308835ff
|
254 |
|
ac9fafa12
|
255 256 |
return ret; } |
c7c22e4d5
|
257 258 |
static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) { |
9cb308ce8
|
259 |
bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d76
|
260 |
bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d5
|
261 |
|
5757a6d76
|
262 |
return queue_var_show(set << force, page); |
c7c22e4d5
|
263 264 265 266 267 268 269 270 271 272 |
} static ssize_t queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) { ssize_t ret = -EINVAL; #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) unsigned long val; ret = queue_var_store(&val, page, count); |
b1f3b64d7
|
273 274 |
if (ret < 0) return ret; |
c7c22e4d5
|
275 |
spin_lock_irq(q->queue_lock); |
e8037d498
|
276 |
if (val == 2) { |
c7c22e4d5
|
277 |
queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
e8037d498
|
278 279 280 281 282 |
queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); } else if (val == 1) { queue_flag_set(QUEUE_FLAG_SAME_COMP, q); queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); } else if (val == 0) { |
5757a6d76
|
283 284 285 |
queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); } |
c7c22e4d5
|
286 287 288 289 |
spin_unlock_irq(q->queue_lock); #endif return ret; } |
8324aa91d
|
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 |
static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .show = queue_requests_show, .store = queue_requests_store, }; static struct queue_sysfs_entry queue_ra_entry = { .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, .show = queue_ra_show, .store = queue_ra_store, }; static struct queue_sysfs_entry queue_max_sectors_entry = { .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, .show = queue_max_sectors_show, .store = queue_max_sectors_store, }; static struct queue_sysfs_entry queue_max_hw_sectors_entry = { .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, .show = queue_max_hw_sectors_show, }; |
c77a5710b
|
313 314 315 316 |
static struct queue_sysfs_entry queue_max_segments_entry = { .attr = {.name = "max_segments", .mode = S_IRUGO }, .show = queue_max_segments_show, }; |
13f05c8d8
|
317 318 319 320 |
static struct queue_sysfs_entry queue_max_integrity_segments_entry = { .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, .show = queue_max_integrity_segments_show, }; |
c77a5710b
|
321 322 323 324 |
static struct queue_sysfs_entry queue_max_segment_size_entry = { .attr = {.name = "max_segment_size", .mode = S_IRUGO }, .show = queue_max_segment_size_show, }; |
8324aa91d
|
325 326 327 328 329 |
static struct queue_sysfs_entry queue_iosched_entry = { .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, .show = elv_iosched_show, .store = elv_iosched_store, }; |
e68b903c6
|
330 331 |
static struct queue_sysfs_entry queue_hw_sector_size_entry = { .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, |
e1defc4ff
|
332 333 334 335 336 337 |
.show = queue_logical_block_size_show, }; static struct queue_sysfs_entry queue_logical_block_size_entry = { .attr = {.name = "logical_block_size", .mode = S_IRUGO }, .show = queue_logical_block_size_show, |
e68b903c6
|
338 |
}; |
c72758f33
|
339 340 341 342 343 344 345 346 347 348 349 350 351 |
static struct queue_sysfs_entry queue_physical_block_size_entry = { .attr = {.name = "physical_block_size", .mode = S_IRUGO }, .show = queue_physical_block_size_show, }; static struct queue_sysfs_entry queue_io_min_entry = { .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, .show = queue_io_min_show, }; static struct queue_sysfs_entry queue_io_opt_entry = { .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, .show = queue_io_opt_show, |
e68b903c6
|
352 |
}; |
86b372814
|
353 354 355 356 357 358 359 360 361 |
static struct queue_sysfs_entry queue_discard_granularity_entry = { .attr = {.name = "discard_granularity", .mode = S_IRUGO }, .show = queue_discard_granularity_show, }; static struct queue_sysfs_entry queue_discard_max_entry = { .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, .show = queue_discard_max_show, }; |
98262f276
|
362 363 364 365 |
static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, .show = queue_discard_zeroes_data_show, }; |
4363ac7c1
|
366 367 368 369 |
static struct queue_sysfs_entry queue_write_same_max_entry = { .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, .show = queue_write_same_max_show, }; |
1308835ff
|
370 371 |
static struct queue_sysfs_entry queue_nonrot_entry = { .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, |
956bcb7c1
|
372 373 |
.show = queue_show_nonrot, .store = queue_store_nonrot, |
1308835ff
|
374 |
}; |
ac9fafa12
|
375 376 377 378 379 |
static struct queue_sysfs_entry queue_nomerges_entry = { .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, .show = queue_nomerges_show, .store = queue_nomerges_store, }; |
c7c22e4d5
|
380 381 382 383 384 |
static struct queue_sysfs_entry queue_rq_affinity_entry = { .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, .show = queue_rq_affinity_show, .store = queue_rq_affinity_store, }; |
bc58ba946
|
385 386 |
static struct queue_sysfs_entry queue_iostats_entry = { .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, |
956bcb7c1
|
387 388 |
.show = queue_show_iostats, .store = queue_store_iostats, |
bc58ba946
|
389 |
}; |
e2e1a148b
|
390 391 |
static struct queue_sysfs_entry queue_random_entry = { .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, |
956bcb7c1
|
392 393 |
.show = queue_show_random, .store = queue_store_random, |
e2e1a148b
|
394 |
}; |
8324aa91d
|
395 396 397 398 399 |
static struct attribute *default_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, &queue_max_hw_sectors_entry.attr, &queue_max_sectors_entry.attr, |
c77a5710b
|
400 |
&queue_max_segments_entry.attr, |
13f05c8d8
|
401 |
&queue_max_integrity_segments_entry.attr, |
c77a5710b
|
402 |
&queue_max_segment_size_entry.attr, |
8324aa91d
|
403 |
&queue_iosched_entry.attr, |
e68b903c6
|
404 |
&queue_hw_sector_size_entry.attr, |
e1defc4ff
|
405 |
&queue_logical_block_size_entry.attr, |
c72758f33
|
406 407 408 |
&queue_physical_block_size_entry.attr, &queue_io_min_entry.attr, &queue_io_opt_entry.attr, |
86b372814
|
409 410 |
&queue_discard_granularity_entry.attr, &queue_discard_max_entry.attr, |
98262f276
|
411 |
&queue_discard_zeroes_data_entry.attr, |
4363ac7c1
|
412 |
&queue_write_same_max_entry.attr, |
1308835ff
|
413 |
&queue_nonrot_entry.attr, |
ac9fafa12
|
414 |
&queue_nomerges_entry.attr, |
c7c22e4d5
|
415 |
&queue_rq_affinity_entry.attr, |
bc58ba946
|
416 |
&queue_iostats_entry.attr, |
e2e1a148b
|
417 |
&queue_random_entry.attr, |
8324aa91d
|
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 |
NULL, }; #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) static ssize_t queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct queue_sysfs_entry *entry = to_queue(attr); struct request_queue *q = container_of(kobj, struct request_queue, kobj); ssize_t res; if (!entry->show) return -EIO; mutex_lock(&q->sysfs_lock); |
3f3299d5c
|
434 |
if (blk_queue_dying(q)) { |
8324aa91d
|
435 436 437 438 439 440 441 442 443 444 445 446 447 |
mutex_unlock(&q->sysfs_lock); return -ENOENT; } res = entry->show(q, page); mutex_unlock(&q->sysfs_lock); return res; } static ssize_t queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct queue_sysfs_entry *entry = to_queue(attr); |
6728cb0e6
|
448 |
struct request_queue *q; |
8324aa91d
|
449 450 451 452 |
ssize_t res; if (!entry->store) return -EIO; |
6728cb0e6
|
453 454 |
q = container_of(kobj, struct request_queue, kobj); |
8324aa91d
|
455 |
mutex_lock(&q->sysfs_lock); |
3f3299d5c
|
456 |
if (blk_queue_dying(q)) { |
8324aa91d
|
457 458 459 460 461 462 463 |
mutex_unlock(&q->sysfs_lock); return -ENOENT; } res = entry->store(q, page, length); mutex_unlock(&q->sysfs_lock); return res; } |
548bc8e1b
|
464 465 466 467 468 469 |
static void blk_free_queue_rcu(struct rcu_head *rcu_head) { struct request_queue *q = container_of(rcu_head, struct request_queue, rcu_head); kmem_cache_free(blk_requestq_cachep, q); } |
8324aa91d
|
470 |
/** |
499337bb6
|
471 472 |
* blk_release_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging to the request queue to be released |
8324aa91d
|
473 474 |
* * Description: |
499337bb6
|
475 |
* blk_release_queue is the pair to blk_init_queue() or |
8324aa91d
|
476 477 478 479 480 481 482 483 484 485 486 487 488 |
* blk_queue_make_request(). It should be called when a request queue is * being released; typically when a block device is being de-registered. * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * * Caveat: * Hopefully the low level driver will have finished any * outstanding requests first... **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); |
8324aa91d
|
489 490 |
blk_sync_queue(q); |
e8989fae3
|
491 |
blkcg_exit_queue(q); |
7e5a87944
|
492 493 494 495 |
if (q->elevator) { spin_lock_irq(q->queue_lock); ioc_clear_queue(q); spin_unlock_irq(q->queue_lock); |
777eb1bf1
|
496 |
elevator_exit(q->elevator); |
7e5a87944
|
497 |
} |
777eb1bf1
|
498 |
|
a051661ca
|
499 |
blk_exit_rl(&q->root_rl); |
8324aa91d
|
500 501 502 503 504 505 506 |
if (q->queue_tags) __blk_queue_free_tags(q); blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); |
a73f730d0
|
507 508 |
ida_simple_remove(&blk_queue_ida, q->id); |
548bc8e1b
|
509 |
call_rcu(&q->rcu_head, blk_free_queue_rcu); |
8324aa91d
|
510 |
} |
52cf25d0a
|
511 |
static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91d
|
512 513 514 515 516 517 518 519 520 521 522 523 524 |
.show = queue_attr_show, .store = queue_attr_store, }; struct kobj_type blk_queue_ktype = { .sysfs_ops = &queue_sysfs_ops, .default_attrs = default_attrs, .release = blk_release_queue, }; int blk_register_queue(struct gendisk *disk) { int ret; |
1d54ad6da
|
525 |
struct device *dev = disk_to_dev(disk); |
8324aa91d
|
526 |
struct request_queue *q = disk->queue; |
fb1997463
|
527 |
if (WARN_ON(!q)) |
8324aa91d
|
528 |
return -ENXIO; |
749fefe67
|
529 530 531 532 533 |
/* * Initialization must be complete by now. Finish the initial * bypass from queue allocation. */ blk_queue_bypass_end(q); |
1d54ad6da
|
534 535 536 |
ret = blk_trace_init_sysfs(dev); if (ret) return ret; |
c9059598e
|
537 |
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3c
|
538 539 |
if (ret < 0) { blk_trace_remove_sysfs(dev); |
8324aa91d
|
540 |
return ret; |
ed5302d3c
|
541 |
} |
8324aa91d
|
542 543 |
kobject_uevent(&q->kobj, KOBJ_ADD); |
cd43e26f0
|
544 545 |
if (!q->request_fn) return 0; |
8324aa91d
|
546 547 548 549 |
ret = elv_register_queue(q); if (ret) { kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); |
80656b67b
|
550 |
blk_trace_remove_sysfs(dev); |
c87ffbb81
|
551 |
kobject_put(&dev->kobj); |
8324aa91d
|
552 553 554 555 556 557 558 559 560 |
return ret; } return 0; } void blk_unregister_queue(struct gendisk *disk) { struct request_queue *q = disk->queue; |
fb1997463
|
561 562 |
if (WARN_ON(!q)) return; |
48c0d4d4c
|
563 |
if (q->request_fn) |
8324aa91d
|
564 |
elv_unregister_queue(q); |
48c0d4d4c
|
565 566 567 568 |
kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); kobject_put(&disk_to_dev(disk)->kobj); |
8324aa91d
|
569 |
} |