Blame view
block/blk-sysfs.c
26.4 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
8324aa91d block: split tag ... |
2 3 4 5 |
/* * Functions related to sysfs handling */ #include <linux/kernel.h> |
5a0e3ad6a include cleanup: ... |
6 |
#include <linux/slab.h> |
8324aa91d block: split tag ... |
7 8 9 |
#include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> |
66114cad6 writeback: separa... |
10 |
#include <linux/backing-dev.h> |
8324aa91d block: split tag ... |
11 |
#include <linux/blktrace_api.h> |
320ae51fe blk-mq: new multi... |
12 |
#include <linux/blk-mq.h> |
eea8f41cc blkcg: move block... |
13 |
#include <linux/blk-cgroup.h> |
8324aa91d block: split tag ... |
14 15 |
#include "blk.h" |
3edcc0ce8 block: blk-mq: do... |
16 |
#include "blk-mq.h" |
d173a2516 blk-mq: move debu... |
17 |
#include "blk-mq-debugfs.h" |
87760e5ee block: hook up wr... |
18 |
#include "blk-wbt.h" |
8324aa91d block: split tag ... |
19 20 21 22 23 24 25 26 |
struct queue_sysfs_entry { struct attribute attr; ssize_t (*show)(struct request_queue *, char *); ssize_t (*store)(struct request_queue *, const char *, size_t); }; static ssize_t |
9cb308ce8 block: sysfs fix ... |
27 |
queue_var_show(unsigned long var, char *page) |
8324aa91d block: split tag ... |
28 |
{ |
9cb308ce8 block: sysfs fix ... |
29 30 |
return sprintf(page, "%lu ", var); |
8324aa91d block: split tag ... |
31 32 33 34 35 |
} static ssize_t queue_var_store(unsigned long *var, const char *page, size_t count) { |
b1f3b64d7 block: reject inv... |
36 37 |
int err; unsigned long v; |
ed751e683 block/blk-sysfs.c... |
38 |
err = kstrtoul(page, 10, &v); |
b1f3b64d7 block: reject inv... |
39 40 41 42 |
if (err || v > UINT_MAX) return -EINVAL; *var = v; |
8324aa91d block: split tag ... |
43 |
|
8324aa91d block: split tag ... |
44 45 |
return count; } |
80e091d10 blk-wbt: allow re... |
46 |
static ssize_t queue_var_store64(s64 *var, const char *page) |
87760e5ee block: hook up wr... |
47 48 |
{ int err; |
80e091d10 blk-wbt: allow re... |
49 |
s64 v; |
87760e5ee block: hook up wr... |
50 |
|
80e091d10 blk-wbt: allow re... |
51 |
err = kstrtos64(page, 10, &v); |
87760e5ee block: hook up wr... |
52 53 54 55 56 57 |
if (err < 0) return err; *var = v; return 0; } |
8324aa91d block: split tag ... |
58 59 60 61 62 63 64 65 |
static ssize_t queue_requests_show(struct request_queue *q, char *page) { return queue_var_show(q->nr_requests, (page)); } static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { |
8324aa91d block: split tag ... |
66 |
unsigned long nr; |
e3a2b3f93 blk-mq: allow cha... |
67 |
int ret, err; |
b8a9ae779 block: don't assu... |
68 |
|
344e9ffcb block: add queue_... |
69 |
if (!queue_is_mq(q)) |
b8a9ae779 block: don't assu... |
70 71 72 |
return -EINVAL; ret = queue_var_store(&nr, page, count); |
b1f3b64d7 block: reject inv... |
73 74 |
if (ret < 0) return ret; |
8324aa91d block: split tag ... |
75 76 |
if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; |
a1ce35fa4 block: remove dea... |
77 |
err = blk_mq_update_nr_requests(q, nr); |
e3a2b3f93 blk-mq: allow cha... |
78 79 |
if (err) return err; |
8324aa91d block: split tag ... |
80 81 82 83 84 |
return ret; } static ssize_t queue_ra_show(struct request_queue *q, char *page) { |
dc3b17cc8 block: Use pointe... |
85 |
unsigned long ra_kb = q->backing_dev_info->ra_pages << |
09cbfeaf1 mm, fs: get rid o... |
86 |
(PAGE_SHIFT - 10); |
8324aa91d block: split tag ... |
87 88 89 90 91 92 93 94 95 |
return queue_var_show(ra_kb, (page)); } static ssize_t queue_ra_store(struct request_queue *q, const char *page, size_t count) { unsigned long ra_kb; ssize_t ret = queue_var_store(&ra_kb, page, count); |
b1f3b64d7 block: reject inv... |
96 97 |
if (ret < 0) return ret; |
dc3b17cc8 block: Use pointe... |
98 |
q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
8324aa91d block: split tag ... |
99 100 101 102 103 104 |
return ret; } static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) { |
ae03bf639 block: Use access... |
105 |
int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91d block: split tag ... |
106 107 108 |
return queue_var_show(max_sectors_kb, (page)); } |
c77a5710b block: Export max... |
109 110 111 112 |
static ssize_t queue_max_segments_show(struct request_queue *q, char *page) { return queue_var_show(queue_max_segments(q), (page)); } |
1e739730c block: optionally... |
113 114 115 116 117 |
static ssize_t queue_max_discard_segments_show(struct request_queue *q, char *page) { return queue_var_show(queue_max_discard_segments(q), (page)); } |
13f05c8d8 block/scsi: Provi... |
118 119 120 121 |
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.max_integrity_segments, (page)); } |
c77a5710b block: Export max... |
122 123 |
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) { |
38417468d scsi: block: remo... |
124 |
return queue_var_show(queue_max_segment_size(q), (page)); |
c77a5710b block: Export max... |
125 |
} |
e1defc4ff block: Do away wi... |
126 |
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c6 Expose hardware s... |
127 |
{ |
e1defc4ff block: Do away wi... |
128 |
return queue_var_show(queue_logical_block_size(q), page); |
e68b903c6 Expose hardware s... |
129 |
} |
c72758f33 block: Export I/O... |
130 131 132 133 |
static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) { return queue_var_show(queue_physical_block_size(q), page); } |
87caf97cf blk-sysfs: Add 'c... |
134 135 136 137 |
static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.chunk_sectors, page); } |
c72758f33 block: Export I/O... |
138 139 140 141 142 143 144 145 |
static ssize_t queue_io_min_show(struct request_queue *q, char *page) { return queue_var_show(queue_io_min(q), page); } static ssize_t queue_io_opt_show(struct request_queue *q, char *page) { return queue_var_show(queue_io_opt(q), page); |
e68b903c6 Expose hardware s... |
146 |
} |
86b372814 block: Expose dis... |
147 148 149 150 |
static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) { return queue_var_show(q->limits.discard_granularity, page); } |
0034af036 block: make /sys/... |
151 152 |
static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) { |
0034af036 block: make /sys/... |
153 |
|
18f922d03 blk: fix overflow... |
154 155 156 |
return sprintf(page, "%llu ", (unsigned long long)q->limits.max_hw_discard_sectors << 9); |
0034af036 block: make /sys/... |
157 |
} |
86b372814 block: Expose dis... |
158 159 |
static ssize_t queue_discard_max_show(struct request_queue *q, char *page) { |
a934a00a6 block: Fix discar... |
160 161 162 |
return sprintf(page, "%llu ", (unsigned long long)q->limits.max_discard_sectors << 9); |
86b372814 block: Expose dis... |
163 |
} |
0034af036 block: make /sys/... |
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 |
static ssize_t queue_discard_max_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_discard; ssize_t ret = queue_var_store(&max_discard, page, count); if (ret < 0) return ret; if (max_discard & (q->limits.discard_granularity - 1)) return -EINVAL; max_discard >>= 9; if (max_discard > UINT_MAX) return -EINVAL; if (max_discard > q->limits.max_hw_discard_sectors) max_discard = q->limits.max_hw_discard_sectors; q->limits.max_discard_sectors = max_discard; return ret; } |
98262f276 block: Allow devi... |
186 187 |
static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) { |
48920ff2a block: remove the... |
188 |
return queue_var_show(0, page); |
98262f276 block: Allow devi... |
189 |
} |
4363ac7c1 block: Implement ... |
190 191 192 193 194 195 |
static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) { return sprintf(page, "%llu ", (unsigned long long)q->limits.max_write_same_sectors << 9); } |
a6f0788ec block: add suppor... |
196 197 198 199 200 201 |
static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) { return sprintf(page, "%llu ", (unsigned long long)q->limits.max_write_zeroes_sectors << 9); } |
4363ac7c1 block: Implement ... |
202 |
|
8324aa91d block: split tag ... |
203 204 205 206 |
static ssize_t queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_sectors_kb, |
ae03bf639 block: Use access... |
207 |
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
09cbfeaf1 mm, fs: get rid o... |
208 |
page_kb = 1 << (PAGE_SHIFT - 10); |
8324aa91d block: split tag ... |
209 |
ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
b1f3b64d7 block: reject inv... |
210 211 |
if (ret < 0) return ret; |
ca369d51b block/sd: Fix dev... |
212 213 |
max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) q->limits.max_dev_sectors >> 1); |
8324aa91d block: split tag ... |
214 215 |
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) return -EINVAL; |
7c239517d block: don't take... |
216 |
|
0d945c1f9 block: remove the... |
217 |
spin_lock_irq(&q->queue_lock); |
c295fc057 block: Allow chan... |
218 |
q->limits.max_sectors = max_sectors_kb << 1; |
dc3b17cc8 block: Use pointe... |
219 |
q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); |
0d945c1f9 block: remove the... |
220 |
spin_unlock_irq(&q->queue_lock); |
8324aa91d block: split tag ... |
221 222 223 224 225 226 |
return ret; } static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) { |
ae03bf639 block: Use access... |
227 |
int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91d block: split tag ... |
228 229 230 |
return queue_var_show(max_hw_sectors_kb, (page)); } |
956bcb7c1 block: add helper... |
231 232 233 234 235 236 237 238 239 240 241 242 243 244 |
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ static ssize_t \ queue_show_##name(struct request_queue *q, char *page) \ { \ int bit; \ bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ return queue_var_show(neg ? !bit : bit, page); \ } \ static ssize_t \ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ { \ unsigned long val; \ ssize_t ret; \ ret = queue_var_store(&val, page, count); \ |
c678ef528 block: avoid usin... |
245 246 |
if (ret < 0) \ return ret; \ |
956bcb7c1 block: add helper... |
247 248 249 |
if (neg) \ val = !val; \ \ |
956bcb7c1 block: add helper... |
250 |
if (val) \ |
8814ce8a0 block: Introduce ... |
251 |
blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ |
956bcb7c1 block: add helper... |
252 |
else \ |
8814ce8a0 block: Introduce ... |
253 |
blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
956bcb7c1 block: add helper... |
254 |
return ret; \ |
1308835ff block: export SSD... |
255 |
} |
956bcb7c1 block: add helper... |
256 257 258 259 |
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); #undef QUEUE_SYSFS_BIT_FNS |
1308835ff block: export SSD... |
260 |
|
797476b88 block: Add 'zoned... |
261 262 263 264 265 266 267 268 269 270 271 272 273 274 |
static ssize_t queue_zoned_show(struct request_queue *q, char *page) { switch (blk_queue_zoned_model(q)) { case BLK_ZONED_HA: return sprintf(page, "host-aware "); case BLK_ZONED_HM: return sprintf(page, "host-managed "); default: return sprintf(page, "none "); } } |
965b652e9 block: Expose que... |
275 276 277 278 |
static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) { return queue_var_show(blk_queue_nr_zones(q), page); } |
ac9fafa12 block: Skip I/O m... |
279 280 |
static ssize_t queue_nomerges_show(struct request_queue *q, char *page) { |
488991e28 block: Added in s... |
281 282 |
return queue_var_show((blk_queue_nomerges(q) << 1) | blk_queue_noxmerges(q), page); |
ac9fafa12 block: Skip I/O m... |
283 284 285 286 287 288 289 |
} static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, size_t count) { unsigned long nm; ssize_t ret = queue_var_store(&nm, page, count); |
b1f3b64d7 block: reject inv... |
290 291 |
if (ret < 0) return ret; |
57d74df90 block: use atomic... |
292 293 |
blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); |
488991e28 block: Added in s... |
294 |
if (nm == 2) |
57d74df90 block: use atomic... |
295 |
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e28 block: Added in s... |
296 |
else if (nm) |
57d74df90 block: use atomic... |
297 |
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
1308835ff block: export SSD... |
298 |
|
ac9fafa12 block: Skip I/O m... |
299 300 |
return ret; } |
c7c22e4d5 block: add suppor... |
301 302 |
static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) { |
9cb308ce8 block: sysfs fix ... |
303 |
bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d76 block: strict rq_... |
304 |
bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d5 block: add suppor... |
305 |
|
5757a6d76 block: strict rq_... |
306 |
return queue_var_show(set << force, page); |
c7c22e4d5 block: add suppor... |
307 308 309 310 311 312 |
} static ssize_t queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) { ssize_t ret = -EINVAL; |
0a06ff068 kernel: remove CO... |
313 |
#ifdef CONFIG_SMP |
c7c22e4d5 block: add suppor... |
314 315 316 |
unsigned long val; ret = queue_var_store(&val, page, count); |
b1f3b64d7 block: reject inv... |
317 318 |
if (ret < 0) return ret; |
e8037d498 block: Fix queue_... |
319 |
if (val == 2) { |
57d74df90 block: use atomic... |
320 321 |
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
e8037d498 block: Fix queue_... |
322 |
} else if (val == 1) { |
57d74df90 block: use atomic... |
323 324 |
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
e8037d498 block: Fix queue_... |
325 |
} else if (val == 0) { |
57d74df90 block: use atomic... |
326 327 |
blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); |
5757a6d76 block: strict rq_... |
328 |
} |
c7c22e4d5 block: add suppor... |
329 330 331 |
#endif return ret; } |
8324aa91d block: split tag ... |
332 |
|
06426adf0 blk-mq: implement... |
333 334 |
static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) { |
64f1c21e8 blk-mq: make the ... |
335 |
int val; |
29ece8b43 block: add BLK_MQ... |
336 337 |
if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) val = BLK_MQ_POLL_CLASSIC; |
64f1c21e8 blk-mq: make the ... |
338 339 340 341 342 |
else val = q->poll_nsec / 1000; return sprintf(page, "%d ", val); |
06426adf0 blk-mq: implement... |
343 344 345 346 347 |
} static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, size_t count) { |
64f1c21e8 blk-mq: make the ... |
348 |
int err, val; |
06426adf0 blk-mq: implement... |
349 350 351 |
if (!q->mq_ops || !q->mq_ops->poll) return -EINVAL; |
64f1c21e8 blk-mq: make the ... |
352 353 354 |
err = kstrtoint(page, 10, &val); if (err < 0) return err; |
06426adf0 blk-mq: implement... |
355 |
|
29ece8b43 block: add BLK_MQ... |
356 357 358 |
if (val == BLK_MQ_POLL_CLASSIC) q->poll_nsec = BLK_MQ_POLL_CLASSIC; else if (val >= 0) |
64f1c21e8 blk-mq: make the ... |
359 |
q->poll_nsec = val * 1000; |
29ece8b43 block: add BLK_MQ... |
360 361 |
else return -EINVAL; |
64f1c21e8 blk-mq: make the ... |
362 363 |
return count; |
06426adf0 blk-mq: implement... |
364 |
} |
05229beed block: add block ... |
365 366 367 368 369 370 371 372 373 374 |
static ssize_t queue_poll_show(struct request_queue *q, char *page) { return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); } static ssize_t queue_poll_store(struct request_queue *q, const char *page, size_t count) { unsigned long poll_on; ssize_t ret; |
cd19181bf blk-mq: enable IO... |
375 376 |
if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) |
05229beed block: add block ... |
377 378 379 380 381 |
return -EINVAL; ret = queue_var_store(&poll_on, page, count); if (ret < 0) return ret; |
05229beed block: add block ... |
382 |
if (poll_on) |
8814ce8a0 block: Introduce ... |
383 |
blk_queue_flag_set(QUEUE_FLAG_POLL, q); |
05229beed block: add block ... |
384 |
else |
8814ce8a0 block: Introduce ... |
385 |
blk_queue_flag_clear(QUEUE_FLAG_POLL, q); |
05229beed block: add block ... |
386 387 388 |
return ret; } |
65cd1d13b block: add io tim... |
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 |
static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) { return sprintf(page, "%u ", jiffies_to_msecs(q->rq_timeout)); } static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, size_t count) { unsigned int val; int err; err = kstrtou32(page, 10, &val); if (err || val == 0) return -EINVAL; blk_queue_rq_timeout(q, msecs_to_jiffies(val)); return count; } |
87760e5ee block: hook up wr... |
409 410 |
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) { |
a79050434 blk-rq-qos: refac... |
411 |
if (!wbt_rq_qos(q)) |
87760e5ee block: hook up wr... |
412 |
return -EINVAL; |
a79050434 blk-rq-qos: refac... |
413 414 |
return sprintf(page, "%llu ", div_u64(wbt_get_min_lat(q), 1000)); |
87760e5ee block: hook up wr... |
415 416 417 418 419 |
} static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, size_t count) { |
a79050434 blk-rq-qos: refac... |
420 |
struct rq_qos *rqos; |
87760e5ee block: hook up wr... |
421 |
ssize_t ret; |
80e091d10 blk-wbt: allow re... |
422 |
s64 val; |
87760e5ee block: hook up wr... |
423 |
|
87760e5ee block: hook up wr... |
424 425 426 |
ret = queue_var_store64(&val, page); if (ret < 0) return ret; |
d62118b6d blk-wbt: allow wb... |
427 428 |
if (val < -1) return -EINVAL; |
a79050434 blk-rq-qos: refac... |
429 430 |
rqos = wbt_rq_qos(q); if (!rqos) { |
d62118b6d blk-wbt: allow wb... |
431 432 433 |
ret = wbt_init(q); if (ret) return ret; |
d62118b6d blk-wbt: allow wb... |
434 |
} |
87760e5ee block: hook up wr... |
435 |
|
80e091d10 blk-wbt: allow re... |
436 |
if (val == -1) |
a79050434 blk-rq-qos: refac... |
437 |
val = wbt_default_latency_nsec(q); |
80e091d10 blk-wbt: allow re... |
438 |
else if (val >= 0) |
a79050434 blk-rq-qos: refac... |
439 |
val *= 1000ULL; |
d62118b6d blk-wbt: allow wb... |
440 |
|
b7143fe67 block: avoid sett... |
441 442 |
if (wbt_get_min_lat(q) == val) return count; |
c125311d9 blk-wbt: don't ma... |
443 444 445 446 447 |
/* * Ensure that the queue is idled, in case the latency update * ends up either enabling or disabling wbt completely. We can't * have IO inflight if that happens. */ |
a1ce35fa4 block: remove dea... |
448 449 |
blk_mq_freeze_queue(q); blk_mq_quiesce_queue(q); |
80e091d10 blk-wbt: allow re... |
450 |
|
c125311d9 blk-wbt: don't ma... |
451 |
wbt_set_min_lat(q, val); |
c125311d9 blk-wbt: don't ma... |
452 |
|
a1ce35fa4 block: remove dea... |
453 454 |
blk_mq_unquiesce_queue(q); blk_mq_unfreeze_queue(q); |
c125311d9 blk-wbt: don't ma... |
455 |
|
87760e5ee block: hook up wr... |
456 457 |
return count; } |
93e9d8e83 block: add abilit... |
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 |
static ssize_t queue_wc_show(struct request_queue *q, char *page) { if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) return sprintf(page, "write back "); return sprintf(page, "write through "); } static ssize_t queue_wc_store(struct request_queue *q, const char *page, size_t count) { int set = -1; if (!strncmp(page, "write back", 10)) set = 1; else if (!strncmp(page, "write through", 13) || !strncmp(page, "none", 4)) set = 0; if (set == -1) return -EINVAL; |
93e9d8e83 block: add abilit... |
481 |
if (set) |
8814ce8a0 block: Introduce ... |
482 |
blk_queue_flag_set(QUEUE_FLAG_WC, q); |
93e9d8e83 block: add abilit... |
483 |
else |
8814ce8a0 block: Introduce ... |
484 |
blk_queue_flag_clear(QUEUE_FLAG_WC, q); |
93e9d8e83 block: add abilit... |
485 486 487 |
return count; } |
6fcefbe57 block: Add sysfs ... |
488 489 490 491 492 |
static ssize_t queue_fua_show(struct request_queue *q, char *page) { return sprintf(page, "%u ", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); } |
ea6ca600e block: expose QUE... |
493 494 495 496 |
static ssize_t queue_dax_show(struct request_queue *q, char *page) { return queue_var_show(blk_queue_dax(q), page); } |
8324aa91d block: split tag ... |
497 |
static struct queue_sysfs_entry queue_requests_entry = { |
5657a819a block drivers/blo... |
498 |
.attr = {.name = "nr_requests", .mode = 0644 }, |
8324aa91d block: split tag ... |
499 500 501 502 503 |
.show = queue_requests_show, .store = queue_requests_store, }; static struct queue_sysfs_entry queue_ra_entry = { |
5657a819a block drivers/blo... |
504 |
.attr = {.name = "read_ahead_kb", .mode = 0644 }, |
8324aa91d block: split tag ... |
505 506 507 508 509 |
.show = queue_ra_show, .store = queue_ra_store, }; static struct queue_sysfs_entry queue_max_sectors_entry = { |
5657a819a block drivers/blo... |
510 |
.attr = {.name = "max_sectors_kb", .mode = 0644 }, |
8324aa91d block: split tag ... |
511 512 513 514 515 |
.show = queue_max_sectors_show, .store = queue_max_sectors_store, }; static struct queue_sysfs_entry queue_max_hw_sectors_entry = { |
5657a819a block drivers/blo... |
516 |
.attr = {.name = "max_hw_sectors_kb", .mode = 0444 }, |
8324aa91d block: split tag ... |
517 518 |
.show = queue_max_hw_sectors_show, }; |
c77a5710b block: Export max... |
519 |
static struct queue_sysfs_entry queue_max_segments_entry = { |
5657a819a block drivers/blo... |
520 |
.attr = {.name = "max_segments", .mode = 0444 }, |
c77a5710b block: Export max... |
521 522 |
.show = queue_max_segments_show, }; |
1e739730c block: optionally... |
523 |
static struct queue_sysfs_entry queue_max_discard_segments_entry = { |
5657a819a block drivers/blo... |
524 |
.attr = {.name = "max_discard_segments", .mode = 0444 }, |
1e739730c block: optionally... |
525 526 |
.show = queue_max_discard_segments_show, }; |
13f05c8d8 block/scsi: Provi... |
527 |
static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
5657a819a block drivers/blo... |
528 |
.attr = {.name = "max_integrity_segments", .mode = 0444 }, |
13f05c8d8 block/scsi: Provi... |
529 530 |
.show = queue_max_integrity_segments_show, }; |
c77a5710b block: Export max... |
531 |
static struct queue_sysfs_entry queue_max_segment_size_entry = { |
5657a819a block drivers/blo... |
532 |
.attr = {.name = "max_segment_size", .mode = 0444 }, |
c77a5710b block: Export max... |
533 534 |
.show = queue_max_segment_size_show, }; |
8324aa91d block: split tag ... |
535 |
static struct queue_sysfs_entry queue_iosched_entry = { |
5657a819a block drivers/blo... |
536 |
.attr = {.name = "scheduler", .mode = 0644 }, |
8324aa91d block: split tag ... |
537 538 539 |
.show = elv_iosched_show, .store = elv_iosched_store, }; |
e68b903c6 Expose hardware s... |
540 |
static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
5657a819a block drivers/blo... |
541 |
.attr = {.name = "hw_sector_size", .mode = 0444 }, |
e1defc4ff block: Do away wi... |
542 543 544 545 |
.show = queue_logical_block_size_show, }; static struct queue_sysfs_entry queue_logical_block_size_entry = { |
5657a819a block drivers/blo... |
546 |
.attr = {.name = "logical_block_size", .mode = 0444 }, |
e1defc4ff block: Do away wi... |
547 |
.show = queue_logical_block_size_show, |
e68b903c6 Expose hardware s... |
548 |
}; |
c72758f33 block: Export I/O... |
549 |
static struct queue_sysfs_entry queue_physical_block_size_entry = { |
5657a819a block drivers/blo... |
550 |
.attr = {.name = "physical_block_size", .mode = 0444 }, |
c72758f33 block: Export I/O... |
551 552 |
.show = queue_physical_block_size_show, }; |
87caf97cf blk-sysfs: Add 'c... |
553 |
static struct queue_sysfs_entry queue_chunk_sectors_entry = { |
5657a819a block drivers/blo... |
554 |
.attr = {.name = "chunk_sectors", .mode = 0444 }, |
87caf97cf blk-sysfs: Add 'c... |
555 556 |
.show = queue_chunk_sectors_show, }; |
c72758f33 block: Export I/O... |
557 |
static struct queue_sysfs_entry queue_io_min_entry = { |
5657a819a block drivers/blo... |
558 |
.attr = {.name = "minimum_io_size", .mode = 0444 }, |
c72758f33 block: Export I/O... |
559 560 561 562 |
.show = queue_io_min_show, }; static struct queue_sysfs_entry queue_io_opt_entry = { |
5657a819a block drivers/blo... |
563 |
.attr = {.name = "optimal_io_size", .mode = 0444 }, |
c72758f33 block: Export I/O... |
564 |
.show = queue_io_opt_show, |
e68b903c6 Expose hardware s... |
565 |
}; |
86b372814 block: Expose dis... |
566 |
static struct queue_sysfs_entry queue_discard_granularity_entry = { |
5657a819a block drivers/blo... |
567 |
.attr = {.name = "discard_granularity", .mode = 0444 }, |
86b372814 block: Expose dis... |
568 569 |
.show = queue_discard_granularity_show, }; |
0034af036 block: make /sys/... |
570 |
static struct queue_sysfs_entry queue_discard_max_hw_entry = { |
5657a819a block drivers/blo... |
571 |
.attr = {.name = "discard_max_hw_bytes", .mode = 0444 }, |
0034af036 block: make /sys/... |
572 573 |
.show = queue_discard_max_hw_show, }; |
86b372814 block: Expose dis... |
574 |
static struct queue_sysfs_entry queue_discard_max_entry = { |
5657a819a block drivers/blo... |
575 |
.attr = {.name = "discard_max_bytes", .mode = 0644 }, |
86b372814 block: Expose dis... |
576 |
.show = queue_discard_max_show, |
0034af036 block: make /sys/... |
577 |
.store = queue_discard_max_store, |
86b372814 block: Expose dis... |
578 |
}; |
98262f276 block: Allow devi... |
579 |
static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
5657a819a block drivers/blo... |
580 |
.attr = {.name = "discard_zeroes_data", .mode = 0444 }, |
98262f276 block: Allow devi... |
581 582 |
.show = queue_discard_zeroes_data_show, }; |
4363ac7c1 block: Implement ... |
583 |
static struct queue_sysfs_entry queue_write_same_max_entry = { |
5657a819a block drivers/blo... |
584 |
.attr = {.name = "write_same_max_bytes", .mode = 0444 }, |
4363ac7c1 block: Implement ... |
585 586 |
.show = queue_write_same_max_show, }; |
a6f0788ec block: add suppor... |
587 |
static struct queue_sysfs_entry queue_write_zeroes_max_entry = { |
5657a819a block drivers/blo... |
588 |
.attr = {.name = "write_zeroes_max_bytes", .mode = 0444 }, |
a6f0788ec block: add suppor... |
589 590 |
.show = queue_write_zeroes_max_show, }; |
1308835ff block: export SSD... |
591 |
static struct queue_sysfs_entry queue_nonrot_entry = { |
5657a819a block drivers/blo... |
592 |
.attr = {.name = "rotational", .mode = 0644 }, |
956bcb7c1 block: add helper... |
593 594 |
.show = queue_show_nonrot, .store = queue_store_nonrot, |
1308835ff block: export SSD... |
595 |
}; |
797476b88 block: Add 'zoned... |
596 |
static struct queue_sysfs_entry queue_zoned_entry = { |
5657a819a block drivers/blo... |
597 |
.attr = {.name = "zoned", .mode = 0444 }, |
797476b88 block: Add 'zoned... |
598 599 |
.show = queue_zoned_show, }; |
965b652e9 block: Expose que... |
600 601 602 603 |
static struct queue_sysfs_entry queue_nr_zones_entry = { .attr = {.name = "nr_zones", .mode = 0444 }, .show = queue_nr_zones_show, }; |
ac9fafa12 block: Skip I/O m... |
604 |
static struct queue_sysfs_entry queue_nomerges_entry = { |
5657a819a block drivers/blo... |
605 |
.attr = {.name = "nomerges", .mode = 0644 }, |
ac9fafa12 block: Skip I/O m... |
606 607 608 |
.show = queue_nomerges_show, .store = queue_nomerges_store, }; |
c7c22e4d5 block: add suppor... |
609 |
static struct queue_sysfs_entry queue_rq_affinity_entry = { |
5657a819a block drivers/blo... |
610 |
.attr = {.name = "rq_affinity", .mode = 0644 }, |
c7c22e4d5 block: add suppor... |
611 612 613 |
.show = queue_rq_affinity_show, .store = queue_rq_affinity_store, }; |
bc58ba946 block: add sysfs ... |
614 |
static struct queue_sysfs_entry queue_iostats_entry = { |
5657a819a block drivers/blo... |
615 |
.attr = {.name = "iostats", .mode = 0644 }, |
956bcb7c1 block: add helper... |
616 617 |
.show = queue_show_iostats, .store = queue_store_iostats, |
bc58ba946 block: add sysfs ... |
618 |
}; |
e2e1a148b block: add sysfs ... |
619 |
static struct queue_sysfs_entry queue_random_entry = { |
5657a819a block drivers/blo... |
620 |
.attr = {.name = "add_random", .mode = 0644 }, |
956bcb7c1 block: add helper... |
621 622 |
.show = queue_show_random, .store = queue_store_random, |
e2e1a148b block: add sysfs ... |
623 |
}; |
05229beed block: add block ... |
624 |
static struct queue_sysfs_entry queue_poll_entry = { |
5657a819a block drivers/blo... |
625 |
.attr = {.name = "io_poll", .mode = 0644 }, |
05229beed block: add block ... |
626 627 628 |
.show = queue_poll_show, .store = queue_poll_store, }; |
06426adf0 blk-mq: implement... |
629 |
static struct queue_sysfs_entry queue_poll_delay_entry = { |
5657a819a block drivers/blo... |
630 |
.attr = {.name = "io_poll_delay", .mode = 0644 }, |
06426adf0 blk-mq: implement... |
631 632 633 |
.show = queue_poll_delay_show, .store = queue_poll_delay_store, }; |
93e9d8e83 block: add abilit... |
634 |
static struct queue_sysfs_entry queue_wc_entry = { |
5657a819a block drivers/blo... |
635 |
.attr = {.name = "write_cache", .mode = 0644 }, |
93e9d8e83 block: add abilit... |
636 637 638 |
.show = queue_wc_show, .store = queue_wc_store, }; |
6fcefbe57 block: Add sysfs ... |
639 |
static struct queue_sysfs_entry queue_fua_entry = { |
5657a819a block drivers/blo... |
640 |
.attr = {.name = "fua", .mode = 0444 }, |
6fcefbe57 block: Add sysfs ... |
641 642 |
.show = queue_fua_show, }; |
ea6ca600e block: expose QUE... |
643 |
static struct queue_sysfs_entry queue_dax_entry = { |
5657a819a block drivers/blo... |
644 |
.attr = {.name = "dax", .mode = 0444 }, |
ea6ca600e block: expose QUE... |
645 646 |
.show = queue_dax_show, }; |
65cd1d13b block: add io tim... |
647 648 649 650 651 |
static struct queue_sysfs_entry queue_io_timeout_entry = { .attr = {.name = "io_timeout", .mode = 0644 }, .show = queue_io_timeout_show, .store = queue_io_timeout_store, }; |
87760e5ee block: hook up wr... |
652 |
static struct queue_sysfs_entry queue_wb_lat_entry = { |
5657a819a block drivers/blo... |
653 |
.attr = {.name = "wbt_lat_usec", .mode = 0644 }, |
87760e5ee block: hook up wr... |
654 655 656 |
.show = queue_wb_lat_show, .store = queue_wb_lat_store, }; |
297e3d854 blk-throttle: mak... |
657 658 |
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW static struct queue_sysfs_entry throtl_sample_time_entry = { |
5657a819a block drivers/blo... |
659 |
.attr = {.name = "throttle_sample_time", .mode = 0644 }, |
297e3d854 blk-throttle: mak... |
660 661 662 663 |
.show = blk_throtl_sample_time_show, .store = blk_throtl_sample_time_store, }; #endif |
4d25339e3 block: don't show... |
664 |
static struct attribute *queue_attrs[] = { |
8324aa91d block: split tag ... |
665 666 667 668 |
&queue_requests_entry.attr, &queue_ra_entry.attr, &queue_max_hw_sectors_entry.attr, &queue_max_sectors_entry.attr, |
c77a5710b block: Export max... |
669 |
&queue_max_segments_entry.attr, |
1e739730c block: optionally... |
670 |
&queue_max_discard_segments_entry.attr, |
13f05c8d8 block/scsi: Provi... |
671 |
&queue_max_integrity_segments_entry.attr, |
c77a5710b block: Export max... |
672 |
&queue_max_segment_size_entry.attr, |
8324aa91d block: split tag ... |
673 |
&queue_iosched_entry.attr, |
e68b903c6 Expose hardware s... |
674 |
&queue_hw_sector_size_entry.attr, |
e1defc4ff block: Do away wi... |
675 |
&queue_logical_block_size_entry.attr, |
c72758f33 block: Export I/O... |
676 |
&queue_physical_block_size_entry.attr, |
87caf97cf blk-sysfs: Add 'c... |
677 |
&queue_chunk_sectors_entry.attr, |
c72758f33 block: Export I/O... |
678 679 |
&queue_io_min_entry.attr, &queue_io_opt_entry.attr, |
86b372814 block: Expose dis... |
680 681 |
&queue_discard_granularity_entry.attr, &queue_discard_max_entry.attr, |
0034af036 block: make /sys/... |
682 |
&queue_discard_max_hw_entry.attr, |
98262f276 block: Allow devi... |
683 |
&queue_discard_zeroes_data_entry.attr, |
4363ac7c1 block: Implement ... |
684 |
&queue_write_same_max_entry.attr, |
a6f0788ec block: add suppor... |
685 |
&queue_write_zeroes_max_entry.attr, |
1308835ff block: export SSD... |
686 |
&queue_nonrot_entry.attr, |
797476b88 block: Add 'zoned... |
687 |
&queue_zoned_entry.attr, |
965b652e9 block: Expose que... |
688 |
&queue_nr_zones_entry.attr, |
ac9fafa12 block: Skip I/O m... |
689 |
&queue_nomerges_entry.attr, |
c7c22e4d5 block: add suppor... |
690 |
&queue_rq_affinity_entry.attr, |
bc58ba946 block: add sysfs ... |
691 |
&queue_iostats_entry.attr, |
e2e1a148b block: add sysfs ... |
692 |
&queue_random_entry.attr, |
05229beed block: add block ... |
693 |
&queue_poll_entry.attr, |
93e9d8e83 block: add abilit... |
694 |
&queue_wc_entry.attr, |
6fcefbe57 block: Add sysfs ... |
695 |
&queue_fua_entry.attr, |
ea6ca600e block: expose QUE... |
696 |
&queue_dax_entry.attr, |
87760e5ee block: hook up wr... |
697 |
&queue_wb_lat_entry.attr, |
06426adf0 blk-mq: implement... |
698 |
&queue_poll_delay_entry.attr, |
65cd1d13b block: add io tim... |
699 |
&queue_io_timeout_entry.attr, |
297e3d854 blk-throttle: mak... |
700 701 702 |
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW &throtl_sample_time_entry.attr, #endif |
8324aa91d block: split tag ... |
703 704 |
NULL, }; |
4d25339e3 block: don't show... |
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 |
static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); if (attr == &queue_io_timeout_entry.attr && (!q->mq_ops || !q->mq_ops->timeout)) return 0; return attr->mode; } static struct attribute_group queue_attr_group = { .attrs = queue_attrs, .is_visible = queue_attr_visible, }; |
8324aa91d block: split tag ... |
722 723 724 725 726 727 728 729 730 731 732 733 734 |
#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) static ssize_t queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct queue_sysfs_entry *entry = to_queue(attr); struct request_queue *q = container_of(kobj, struct request_queue, kobj); ssize_t res; if (!entry->show) return -EIO; mutex_lock(&q->sysfs_lock); |
3f3299d5c block: Rename que... |
735 |
if (blk_queue_dying(q)) { |
8324aa91d block: split tag ... |
736 737 738 739 740 741 742 743 744 745 746 747 748 |
mutex_unlock(&q->sysfs_lock); return -ENOENT; } res = entry->show(q, page); mutex_unlock(&q->sysfs_lock); return res; } static ssize_t queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct queue_sysfs_entry *entry = to_queue(attr); |
6728cb0e6 block: make core ... |
749 |
struct request_queue *q; |
8324aa91d block: split tag ... |
750 751 752 753 |
ssize_t res; if (!entry->store) return -EIO; |
6728cb0e6 block: make core ... |
754 755 |
q = container_of(kobj, struct request_queue, kobj); |
8324aa91d block: split tag ... |
756 |
mutex_lock(&q->sysfs_lock); |
3f3299d5c block: Rename que... |
757 |
if (blk_queue_dying(q)) { |
8324aa91d block: split tag ... |
758 759 760 761 762 763 764 |
mutex_unlock(&q->sysfs_lock); return -ENOENT; } res = entry->store(q, page, length); mutex_unlock(&q->sysfs_lock); return res; } |
548bc8e1b block: RCU free r... |
765 766 767 768 769 770 |
static void blk_free_queue_rcu(struct rcu_head *rcu_head) { struct request_queue *q = container_of(rcu_head, struct request_queue, rcu_head); kmem_cache_free(blk_requestq_cachep, q); } |
47cdee29e block: move blk_e... |
771 772 773 774 775 776 777 778 779 780 |
/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ static void blk_exit_queue(struct request_queue *q) { /* * Since the I/O scheduler exit code may access cgroup information, * perform I/O scheduler exit before disassociating from the block * cgroup controller. */ if (q->elevator) { ioc_clear_queue(q); |
c3e221921 block: free sched... |
781 |
__elevator_exit(q, q->elevator); |
47cdee29e block: move blk_e... |
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 |
q->elevator = NULL; } /* * Remove all references to @q from the block cgroup controller before * restoring @q->queue_lock to avoid that restoring this pointer causes * e.g. blkcg_print_blkgs() to crash. */ blkcg_exit_queue(q); /* * Since the cgroup code may dereference the @q->backing_dev_info * pointer, only decrease its reference count after having removed the * association with the block cgroup controller. */ bdi_put(q->backing_dev_info); } |
8324aa91d block: split tag ... |
799 |
/** |
1e9364283 blk-sysfs: Rework... |
800 |
* __blk_release_queue - release a request queue |
dc9edc44d block: Fix a blk_... |
801 |
* @work: pointer to the release_work member of the request queue to be released |
8324aa91d block: split tag ... |
802 803 |
* * Description: |
1e9364283 blk-sysfs: Rework... |
804 805 806 807 808 809 |
* This function is called when a block device is being unregistered. The * process of releasing a request queue starts with blk_cleanup_queue, which * set the appropriate flags and then calls blk_put_queue, that decrements * the reference counter of the request queue. Once the reference counter * of the request queue reaches zero, blk_release_queue is called to release * all allocated resources of the request queue. |
dc9edc44d block: Fix a blk_... |
810 811 |
*/ static void __blk_release_queue(struct work_struct *work) |
8324aa91d block: split tag ... |
812 |
{ |
dc9edc44d block: Fix a blk_... |
813 |
struct request_queue *q = container_of(work, typeof(*q), release_work); |
8324aa91d block: split tag ... |
814 |
|
34dbad5d2 blk-stat: convert... |
815 816 817 |
if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) blk_stat_remove_callback(q, q->poll_cb); blk_stat_free_callback(q->poll_cb); |
777eb1bf1 block: Free queue... |
818 |
|
34dbad5d2 blk-stat: convert... |
819 |
blk_free_queue_stats(q->stats); |
e26cc0826 blk-mq: move canc... |
820 821 |
if (queue_is_mq(q)) cancel_delayed_work_sync(&q->requeue_work); |
47cdee29e block: move blk_e... |
822 |
blk_exit_queue(q); |
bf5054569 block: Introduce ... |
823 |
blk_queue_free_zone_bitmaps(q); |
344e9ffcb block: add queue_... |
824 |
if (queue_is_mq(q)) |
e09aae7ed blk-mq: release m... |
825 |
blk_mq_release(q); |
18741986a blk-mq: rework fl... |
826 |
|
8324aa91d block: split tag ... |
827 |
blk_trace_shutdown(q); |
344e9ffcb block: add queue_... |
828 |
if (queue_is_mq(q)) |
62ebce16c blk-mq: move debu... |
829 |
blk_mq_debugfs_unregister(q); |
338aa96d5 block: convert bo... |
830 |
bioset_exit(&q->bio_split); |
54efd50bf block: make gener... |
831 |
|
a73f730d0 block, cfq: move ... |
832 |
ida_simple_remove(&blk_queue_ida, q->id); |
548bc8e1b block: RCU free r... |
833 |
call_rcu(&q->rcu_head, blk_free_queue_rcu); |
8324aa91d block: split tag ... |
834 |
} |
dc9edc44d block: Fix a blk_... |
835 836 837 838 839 840 841 842 |
static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); INIT_WORK(&q->release_work, __blk_release_queue); schedule_work(&q->release_work); } |
52cf25d0a Driver core: Cons... |
843 |
static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91d block: split tag ... |
844 845 846 847 848 849 |
.show = queue_attr_show, .store = queue_attr_store, }; struct kobj_type blk_queue_ktype = { .sysfs_ops = &queue_sysfs_ops, |
8324aa91d block: split tag ... |
850 851 |
.release = blk_release_queue, }; |
2c2086afc block: Protect le... |
852 853 854 855 |
/** * blk_register_queue - register a block layer queue with sysfs * @disk: Disk of which the request queue should be registered with sysfs. */ |
8324aa91d block: split tag ... |
856 857 858 |
int blk_register_queue(struct gendisk *disk) { int ret; |
1d54ad6da blktrace: add tra... |
859 |
struct device *dev = disk_to_dev(disk); |
8324aa91d block: split tag ... |
860 |
struct request_queue *q = disk->queue; |
cecf5d87f block: split .sys... |
861 |
bool has_elevator = false; |
8324aa91d block: split tag ... |
862 |
|
fb1997463 block: fix blk_re... |
863 |
if (WARN_ON(!q)) |
8324aa91d block: split tag ... |
864 |
return -ENXIO; |
58c898ba3 block: add helper... |
865 |
WARN_ONCE(blk_queue_registered(q), |
334335d2f block: warn if sh... |
866 867 868 |
"%s is registering an already registered queue ", kobject_name(&dev->kobj)); |
334335d2f block: warn if sh... |
869 |
|
749fefe67 block: lift the i... |
870 |
/* |
17497acbd blk-mq, percpu_re... |
871 872 873 874 875 876 877 |
* SCSI probing may synchronously create and destroy a lot of * request_queues for non-existent devices. Shutting down a fully * functional queue takes measureable wallclock time as RCU grace * periods are involved. To avoid excessive latency in these * cases, a request_queue starts out in a degraded mode which is * faster to shut down and is made fully functional here as * request_queues for non-existent devices never get registered. |
749fefe67 block: lift the i... |
878 |
*/ |
df35c7c91 Block: fix unbala... |
879 |
if (!blk_queue_init_done(q)) { |
57d74df90 block: use atomic... |
880 |
blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); |
3ef28e83a block: generic re... |
881 |
percpu_ref_switch_to_percpu(&q->q_usage_counter); |
df35c7c91 Block: fix unbala... |
882 |
} |
749fefe67 block: lift the i... |
883 |
|
1d54ad6da blktrace: add tra... |
884 885 886 |
ret = blk_trace_init_sysfs(dev); if (ret) return ret; |
cecf5d87f block: split .sys... |
887 |
mutex_lock(&q->sysfs_dir_lock); |
b410aff2b block: do not all... |
888 |
|
c9059598e Merge branch 'for... |
889 |
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3c block, blk-sysfs:... |
890 891 |
if (ret < 0) { blk_trace_remove_sysfs(dev); |
b410aff2b block: do not all... |
892 |
goto unlock; |
ed5302d3c block, blk-sysfs:... |
893 |
} |
8324aa91d block: split tag ... |
894 |
|
4d25339e3 block: don't show... |
895 896 897 898 899 900 901 |
ret = sysfs_create_group(&q->kobj, &queue_attr_group); if (ret) { blk_trace_remove_sysfs(dev); kobject_del(&q->kobj); kobject_put(&dev->kobj); goto unlock; } |
344e9ffcb block: add queue_... |
902 |
if (queue_is_mq(q)) { |
2d0364c8c blk-mq: Register ... |
903 |
__blk_mq_register_dev(dev, q); |
a8ecdd711 blk-mq: Only regi... |
904 905 |
blk_mq_debugfs_register(q); } |
9c1051aac blk-mq: untangle ... |
906 |
|
b89f625e2 block: don't rele... |
907 |
mutex_lock(&q->sysfs_lock); |
344e9ffcb block: add queue_... |
908 |
if (q->elevator) { |
cecf5d87f block: split .sys... |
909 |
ret = elv_register_queue(q, false); |
80c6b1573 blk-mq-sched: (un... |
910 |
if (ret) { |
b89f625e2 block: don't rele... |
911 |
mutex_unlock(&q->sysfs_lock); |
cecf5d87f block: split .sys... |
912 |
mutex_unlock(&q->sysfs_dir_lock); |
80c6b1573 blk-mq-sched: (un... |
913 914 915 |
kobject_del(&q->kobj); blk_trace_remove_sysfs(dev); kobject_put(&dev->kobj); |
2c2086afc block: Protect le... |
916 |
return ret; |
80c6b1573 blk-mq-sched: (un... |
917 |
} |
cecf5d87f block: split .sys... |
918 |
has_elevator = true; |
8324aa91d block: split tag ... |
919 |
} |
cecf5d87f block: split .sys... |
920 |
|
cecf5d87f block: split .sys... |
921 922 923 924 925 926 927 928 929 |
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); wbt_enable_default(q); blk_throtl_register_queue(q); /* Now everything is ready and send out KOBJ_ADD uevent */ kobject_uevent(&q->kobj, KOBJ_ADD); if (has_elevator) kobject_uevent(&q->elevator->kobj, KOBJ_ADD); mutex_unlock(&q->sysfs_lock); |
b410aff2b block: do not all... |
930 931 |
ret = 0; unlock: |
cecf5d87f block: split .sys... |
932 |
mutex_unlock(&q->sysfs_dir_lock); |
b410aff2b block: do not all... |
933 |
return ret; |
8324aa91d block: split tag ... |
934 |
} |
fa70d2e2c block: allow gend... |
935 |
EXPORT_SYMBOL_GPL(blk_register_queue); |
8324aa91d block: split tag ... |
936 |
|
2c2086afc block: Protect le... |
937 938 939 940 941 942 943 |
/** * blk_unregister_queue - counterpart of blk_register_queue() * @disk: Disk of which the request queue should be unregistered from sysfs. * * Note: the caller is responsible for guaranteeing that this function is called * after blk_register_queue() has finished. */ |
8324aa91d block: split tag ... |
944 945 946 |
void blk_unregister_queue(struct gendisk *disk) { struct request_queue *q = disk->queue; |
fb1997463 block: fix blk_re... |
947 948 |
if (WARN_ON(!q)) return; |
fa70d2e2c block: allow gend... |
949 |
/* Return early if disk->queue was never registered. */ |
58c898ba3 block: add helper... |
950 |
if (!blk_queue_registered(q)) |
fa70d2e2c block: allow gend... |
951 |
return; |
667257e8b block: properly p... |
952 |
/* |
2c2086afc block: Protect le... |
953 954 955 |
* Since sysfs_remove_dir() prevents adding new directory entries * before removal of existing entries starts, protect against * concurrent elv_iosched_store() calls. |
667257e8b block: properly p... |
956 |
*/ |
e9a823fb3 block: fix warnin... |
957 |
mutex_lock(&q->sysfs_lock); |
8814ce8a0 block: Introduce ... |
958 |
blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); |
cecf5d87f block: split .sys... |
959 |
mutex_unlock(&q->sysfs_lock); |
02ba8893a block: fix leak o... |
960 |
|
cecf5d87f block: split .sys... |
961 |
mutex_lock(&q->sysfs_dir_lock); |
2c2086afc block: Protect le... |
962 963 964 965 |
/* * Remove the sysfs attributes before unregistering the queue data * structures that can be modified through sysfs. */ |
344e9ffcb block: add queue_... |
966 |
if (queue_is_mq(q)) |
b21d5b301 blk-mq: register ... |
967 |
blk_mq_unregister_dev(disk_to_dev(disk), q); |
8324aa91d block: split tag ... |
968 |
|
48c0d4d4c Add missing blk_t... |
969 970 971 |
kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); |
667257e8b block: properly p... |
972 |
|
b89f625e2 block: don't rele... |
973 |
mutex_lock(&q->sysfs_lock); |
344e9ffcb block: add queue_... |
974 |
if (q->elevator) |
2c2086afc block: Protect le... |
975 |
elv_unregister_queue(q); |
b89f625e2 block: don't rele... |
976 |
mutex_unlock(&q->sysfs_lock); |
cecf5d87f block: split .sys... |
977 |
mutex_unlock(&q->sysfs_dir_lock); |
2c2086afc block: Protect le... |
978 979 |
kobject_put(&disk_to_dev(disk)->kobj); |
8324aa91d block: split tag ... |
980 |
} |