Blame view

block/blk-sysfs.c 24.4 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
8324aa91d   Jens Axboe   block: split tag ...
2
3
4
5
  /*
   * Functions related to sysfs handling
   */
  #include <linux/kernel.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
6
  #include <linux/slab.h>
8324aa91d   Jens Axboe   block: split tag ...
7
8
9
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
66114cad6   Tejun Heo   writeback: separa...
10
  #include <linux/backing-dev.h>
8324aa91d   Jens Axboe   block: split tag ...
11
  #include <linux/blktrace_api.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
12
  #include <linux/blk-mq.h>
eea8f41cc   Tejun Heo   blkcg: move block...
13
  #include <linux/blk-cgroup.h>
8324aa91d   Jens Axboe   block: split tag ...
14
15
  
  #include "blk.h"
3edcc0ce8   Ming Lei   block: blk-mq: do...
16
  #include "blk-mq.h"
d173a2516   Omar Sandoval   blk-mq: move debu...
17
  #include "blk-mq-debugfs.h"
87760e5ee   Jens Axboe   block: hook up wr...
18
  #include "blk-wbt.h"
8324aa91d   Jens Axboe   block: split tag ...
19
20
21
22
23
24
25
26
  
  struct queue_sysfs_entry {
  	struct attribute attr;
  	ssize_t (*show)(struct request_queue *, char *);
  	ssize_t (*store)(struct request_queue *, const char *, size_t);
  };
  
  static ssize_t
9cb308ce8   Xiaotian Feng   block: sysfs fix ...
27
  queue_var_show(unsigned long var, char *page)
8324aa91d   Jens Axboe   block: split tag ...
28
  {
9cb308ce8   Xiaotian Feng   block: sysfs fix ...
29
30
  	return sprintf(page, "%lu
  ", var);
8324aa91d   Jens Axboe   block: split tag ...
31
32
33
34
35
  }
  
  static ssize_t
  queue_var_store(unsigned long *var, const char *page, size_t count)
  {
b1f3b64d7   Dave Reisner   block: reject inv...
36
37
  	int err;
  	unsigned long v;
ed751e683   Jingoo Han   block/blk-sysfs.c...
38
  	err = kstrtoul(page, 10, &v);
b1f3b64d7   Dave Reisner   block: reject inv...
39
40
41
42
  	if (err || v > UINT_MAX)
  		return -EINVAL;
  
  	*var = v;
8324aa91d   Jens Axboe   block: split tag ...
43

8324aa91d   Jens Axboe   block: split tag ...
44
45
  	return count;
  }
80e091d10   Jens Axboe   blk-wbt: allow re...
46
  static ssize_t queue_var_store64(s64 *var, const char *page)
87760e5ee   Jens Axboe   block: hook up wr...
47
48
  {
  	int err;
80e091d10   Jens Axboe   blk-wbt: allow re...
49
  	s64 v;
87760e5ee   Jens Axboe   block: hook up wr...
50

80e091d10   Jens Axboe   blk-wbt: allow re...
51
  	err = kstrtos64(page, 10, &v);
87760e5ee   Jens Axboe   block: hook up wr...
52
53
54
55
56
57
  	if (err < 0)
  		return err;
  
  	*var = v;
  	return 0;
  }
8324aa91d   Jens Axboe   block: split tag ...
58
59
60
61
62
63
64
65
  static ssize_t queue_requests_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(q->nr_requests, (page));
  }
  
  static ssize_t
  queue_requests_store(struct request_queue *q, const char *page, size_t count)
  {
8324aa91d   Jens Axboe   block: split tag ...
66
  	unsigned long nr;
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
67
  	int ret, err;
b8a9ae779   Jens Axboe   block: don't assu...
68

e3a2b3f93   Jens Axboe   blk-mq: allow cha...
69
  	if (!q->request_fn && !q->mq_ops)
b8a9ae779   Jens Axboe   block: don't assu...
70
71
72
  		return -EINVAL;
  
  	ret = queue_var_store(&nr, page, count);
b1f3b64d7   Dave Reisner   block: reject inv...
73
74
  	if (ret < 0)
  		return ret;
8324aa91d   Jens Axboe   block: split tag ...
75
76
  	if (nr < BLKDEV_MIN_RQ)
  		nr = BLKDEV_MIN_RQ;
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
77
78
79
80
81
82
83
  	if (q->request_fn)
  		err = blk_update_nr_requests(q, nr);
  	else
  		err = blk_mq_update_nr_requests(q, nr);
  
  	if (err)
  		return err;
8324aa91d   Jens Axboe   block: split tag ...
84
85
86
87
88
  	return ret;
  }
  
  static ssize_t queue_ra_show(struct request_queue *q, char *page)
  {
dc3b17cc8   Jan Kara   block: Use pointe...
89
  	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
90
  					(PAGE_SHIFT - 10);
8324aa91d   Jens Axboe   block: split tag ...
91
92
93
94
95
96
97
98
99
  
  	return queue_var_show(ra_kb, (page));
  }
  
  static ssize_t
  queue_ra_store(struct request_queue *q, const char *page, size_t count)
  {
  	unsigned long ra_kb;
  	ssize_t ret = queue_var_store(&ra_kb, page, count);
b1f3b64d7   Dave Reisner   block: reject inv...
100
101
  	if (ret < 0)
  		return ret;
dc3b17cc8   Jan Kara   block: Use pointe...
102
  	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
8324aa91d   Jens Axboe   block: split tag ...
103
104
105
106
107
108
  
  	return ret;
  }
  
  static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
  {
ae03bf639   Martin K. Petersen   block: Use access...
109
  	int max_sectors_kb = queue_max_sectors(q) >> 1;
8324aa91d   Jens Axboe   block: split tag ...
110
111
112
  
  	return queue_var_show(max_sectors_kb, (page));
  }
c77a5710b   Martin K. Petersen   block: Export max...
113
114
115
116
  static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(queue_max_segments(q), (page));
  }
1e739730c   Christoph Hellwig   block: optionally...
117
118
119
120
121
  static ssize_t queue_max_discard_segments_show(struct request_queue *q,
  		char *page)
  {
  	return queue_var_show(queue_max_discard_segments(q), (page));
  }
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
122
123
124
125
  static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(q->limits.max_integrity_segments, (page));
  }
c77a5710b   Martin K. Petersen   block: Export max...
126
127
  static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
  {
e692cb668   Martin K. Petersen   block: Deprecate ...
128
  	if (blk_queue_cluster(q))
c77a5710b   Martin K. Petersen   block: Export max...
129
  		return queue_var_show(queue_max_segment_size(q), (page));
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
130
  	return queue_var_show(PAGE_SIZE, (page));
c77a5710b   Martin K. Petersen   block: Export max...
131
  }
e1defc4ff   Martin K. Petersen   block: Do away wi...
132
  static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
e68b903c6   Martin K. Petersen   Expose hardware s...
133
  {
e1defc4ff   Martin K. Petersen   block: Do away wi...
134
  	return queue_var_show(queue_logical_block_size(q), page);
e68b903c6   Martin K. Petersen   Expose hardware s...
135
  }
c72758f33   Martin K. Petersen   block: Export I/O...
136
137
138
139
  static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(queue_physical_block_size(q), page);
  }
87caf97cf   Hannes Reinecke   blk-sysfs: Add 'c...
140
141
142
143
  static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(q->limits.chunk_sectors, page);
  }
c72758f33   Martin K. Petersen   block: Export I/O...
144
145
146
147
148
149
150
151
  static ssize_t queue_io_min_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(queue_io_min(q), page);
  }
  
  static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(queue_io_opt(q), page);
e68b903c6   Martin K. Petersen   Expose hardware s...
152
  }
86b372814   Martin K. Petersen   block: Expose dis...
153
154
155
156
  static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(q->limits.discard_granularity, page);
  }
0034af036   Jens Axboe   block: make /sys/...
157
158
  static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
  {
0034af036   Jens Axboe   block: make /sys/...
159

18f922d03   Alan   blk: fix overflow...
160
161
162
  	return sprintf(page, "%llu
  ",
  		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
0034af036   Jens Axboe   block: make /sys/...
163
  }
86b372814   Martin K. Petersen   block: Expose dis...
164
165
  static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
  {
a934a00a6   Martin K. Petersen   block: Fix discar...
166
167
168
  	return sprintf(page, "%llu
  ",
  		       (unsigned long long)q->limits.max_discard_sectors << 9);
86b372814   Martin K. Petersen   block: Expose dis...
169
  }
0034af036   Jens Axboe   block: make /sys/...
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
  static ssize_t queue_discard_max_store(struct request_queue *q,
  				       const char *page, size_t count)
  {
  	unsigned long max_discard;
  	ssize_t ret = queue_var_store(&max_discard, page, count);
  
  	if (ret < 0)
  		return ret;
  
  	if (max_discard & (q->limits.discard_granularity - 1))
  		return -EINVAL;
  
  	max_discard >>= 9;
  	if (max_discard > UINT_MAX)
  		return -EINVAL;
  
  	if (max_discard > q->limits.max_hw_discard_sectors)
  		max_discard = q->limits.max_hw_discard_sectors;
  
  	q->limits.max_discard_sectors = max_discard;
  	return ret;
  }
98262f276   Martin K. Petersen   block: Allow devi...
192
193
  static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
  {
48920ff2a   Christoph Hellwig   block: remove the...
194
  	return queue_var_show(0, page);
98262f276   Martin K. Petersen   block: Allow devi...
195
  }
4363ac7c1   Martin K. Petersen   block: Implement ...
196
197
198
199
200
201
  static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
  {
  	return sprintf(page, "%llu
  ",
  		(unsigned long long)q->limits.max_write_same_sectors << 9);
  }
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
202
203
204
205
206
207
  static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
  {
  	return sprintf(page, "%llu
  ",
  		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
  }
4363ac7c1   Martin K. Petersen   block: Implement ...
208

8324aa91d   Jens Axboe   block: split tag ...
209
210
211
212
  static ssize_t
  queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
  {
  	unsigned long max_sectors_kb,
ae03bf639   Martin K. Petersen   block: Use access...
213
  		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
214
  			page_kb = 1 << (PAGE_SHIFT - 10);
8324aa91d   Jens Axboe   block: split tag ...
215
  	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
b1f3b64d7   Dave Reisner   block: reject inv...
216
217
  	if (ret < 0)
  		return ret;
ca369d51b   Martin K. Petersen   block/sd: Fix dev...
218
219
  	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
  					 q->limits.max_dev_sectors >> 1);
8324aa91d   Jens Axboe   block: split tag ...
220
221
  	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
  		return -EINVAL;
7c239517d   Wu Fengguang   block: don't take...
222

8324aa91d   Jens Axboe   block: split tag ...
223
  	spin_lock_irq(q->queue_lock);
c295fc057   Nikanth Karthikesan   block: Allow chan...
224
  	q->limits.max_sectors = max_sectors_kb << 1;
dc3b17cc8   Jan Kara   block: Use pointe...
225
  	q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
8324aa91d   Jens Axboe   block: split tag ...
226
227
228
229
230
231
232
  	spin_unlock_irq(q->queue_lock);
  
  	return ret;
  }
  
  static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
  {
ae03bf639   Martin K. Petersen   block: Use access...
233
  	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
8324aa91d   Jens Axboe   block: split tag ...
234
235
236
  
  	return queue_var_show(max_hw_sectors_kb, (page));
  }
956bcb7c1   Jens Axboe   block: add helper...
237
238
239
240
241
242
243
244
245
246
247
248
249
250
  #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
  static ssize_t								\
  queue_show_##name(struct request_queue *q, char *page)			\
  {									\
  	int bit;							\
  	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
  	return queue_var_show(neg ? !bit : bit, page);			\
  }									\
  static ssize_t								\
  queue_store_##name(struct request_queue *q, const char *page, size_t count) \
  {									\
  	unsigned long val;						\
  	ssize_t ret;							\
  	ret = queue_var_store(&val, page, count);			\
c678ef528   Arnd Bergmann   block: avoid usin...
251
252
  	if (ret < 0)							\
  		 return ret;						\
956bcb7c1   Jens Axboe   block: add helper...
253
254
255
256
257
258
259
260
261
262
  	if (neg)							\
  		val = !val;						\
  									\
  	spin_lock_irq(q->queue_lock);					\
  	if (val)							\
  		queue_flag_set(QUEUE_FLAG_##flag, q);			\
  	else								\
  		queue_flag_clear(QUEUE_FLAG_##flag, q);			\
  	spin_unlock_irq(q->queue_lock);					\
  	return ret;							\
1308835ff   Bartlomiej Zolnierkiewicz   block: export SSD...
263
  }
956bcb7c1   Jens Axboe   block: add helper...
264
265
266
267
  QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
  QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
  QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
  #undef QUEUE_SYSFS_BIT_FNS
1308835ff   Bartlomiej Zolnierkiewicz   block: export SSD...
268

797476b88   Damien Le Moal   block: Add 'zoned...
269
270
271
272
273
274
275
276
277
278
279
280
281
282
  static ssize_t queue_zoned_show(struct request_queue *q, char *page)
  {
  	switch (blk_queue_zoned_model(q)) {
  	case BLK_ZONED_HA:
  		return sprintf(page, "host-aware
  ");
  	case BLK_ZONED_HM:
  		return sprintf(page, "host-managed
  ");
  	default:
  		return sprintf(page, "none
  ");
  	}
  }
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
283
284
  static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
  {
488991e28   Alan D. Brunelle   block: Added in s...
285
286
  	return queue_var_show((blk_queue_nomerges(q) << 1) |
  			       blk_queue_noxmerges(q), page);
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
287
288
289
290
291
292
293
  }
  
  static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
  				    size_t count)
  {
  	unsigned long nm;
  	ssize_t ret = queue_var_store(&nm, page, count);
b1f3b64d7   Dave Reisner   block: reject inv...
294
295
  	if (ret < 0)
  		return ret;
bf0f97025   Jens Axboe   block: sysfs stor...
296
  	spin_lock_irq(q->queue_lock);
488991e28   Alan D. Brunelle   block: Added in s...
297
298
299
  	queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
  	queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
  	if (nm == 2)
bf0f97025   Jens Axboe   block: sysfs stor...
300
  		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
488991e28   Alan D. Brunelle   block: Added in s...
301
302
  	else if (nm)
  		queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
bf0f97025   Jens Axboe   block: sysfs stor...
303
  	spin_unlock_irq(q->queue_lock);
1308835ff   Bartlomiej Zolnierkiewicz   block: export SSD...
304

ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
305
306
  	return ret;
  }
c7c22e4d5   Jens Axboe   block: add suppor...
307
308
  static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
  {
9cb308ce8   Xiaotian Feng   block: sysfs fix ...
309
  	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
5757a6d76   Dan Williams   block: strict rq_...
310
  	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
c7c22e4d5   Jens Axboe   block: add suppor...
311

5757a6d76   Dan Williams   block: strict rq_...
312
  	return queue_var_show(set << force, page);
c7c22e4d5   Jens Axboe   block: add suppor...
313
314
315
316
317
318
  }
  
  static ssize_t
  queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
  {
  	ssize_t ret = -EINVAL;
0a06ff068   Christoph Hellwig   kernel: remove CO...
319
  #ifdef CONFIG_SMP
c7c22e4d5   Jens Axboe   block: add suppor...
320
321
322
  	unsigned long val;
  
  	ret = queue_var_store(&val, page, count);
b1f3b64d7   Dave Reisner   block: reject inv...
323
324
  	if (ret < 0)
  		return ret;
c7c22e4d5   Jens Axboe   block: add suppor...
325
  	spin_lock_irq(q->queue_lock);
e8037d498   Eric Seppanen   block: Fix queue_...
326
  	if (val == 2) {
c7c22e4d5   Jens Axboe   block: add suppor...
327
  		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
e8037d498   Eric Seppanen   block: Fix queue_...
328
329
330
331
332
  		queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
  	} else if (val == 1) {
  		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
  		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
  	} else if (val == 0) {
5757a6d76   Dan Williams   block: strict rq_...
333
334
335
  		queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
  		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
  	}
c7c22e4d5   Jens Axboe   block: add suppor...
336
337
338
339
  	spin_unlock_irq(q->queue_lock);
  #endif
  	return ret;
  }
8324aa91d   Jens Axboe   block: split tag ...
340

06426adf0   Jens Axboe   blk-mq: implement...
341
342
  static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
  {
64f1c21e8   Jens Axboe   blk-mq: make the ...
343
344
345
346
347
348
349
350
351
  	int val;
  
  	if (q->poll_nsec == -1)
  		val = -1;
  	else
  		val = q->poll_nsec / 1000;
  
  	return sprintf(page, "%d
  ", val);
06426adf0   Jens Axboe   blk-mq: implement...
352
353
354
355
356
  }
  
  static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
  				size_t count)
  {
64f1c21e8   Jens Axboe   blk-mq: make the ...
357
  	int err, val;
06426adf0   Jens Axboe   blk-mq: implement...
358
359
360
  
  	if (!q->mq_ops || !q->mq_ops->poll)
  		return -EINVAL;
64f1c21e8   Jens Axboe   blk-mq: make the ...
361
362
363
  	err = kstrtoint(page, 10, &val);
  	if (err < 0)
  		return err;
06426adf0   Jens Axboe   blk-mq: implement...
364

64f1c21e8   Jens Axboe   blk-mq: make the ...
365
366
367
368
369
370
  	if (val == -1)
  		q->poll_nsec = -1;
  	else
  		q->poll_nsec = val * 1000;
  
  	return count;
06426adf0   Jens Axboe   blk-mq: implement...
371
  }
05229beed   Jens Axboe   block: add block ...
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
  static ssize_t queue_poll_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
  }
  
  static ssize_t queue_poll_store(struct request_queue *q, const char *page,
  				size_t count)
  {
  	unsigned long poll_on;
  	ssize_t ret;
  
  	if (!q->mq_ops || !q->mq_ops->poll)
  		return -EINVAL;
  
  	ret = queue_var_store(&poll_on, page, count);
  	if (ret < 0)
  		return ret;
  
  	spin_lock_irq(q->queue_lock);
  	if (poll_on)
  		queue_flag_set(QUEUE_FLAG_POLL, q);
  	else
  		queue_flag_clear(QUEUE_FLAG_POLL, q);
  	spin_unlock_irq(q->queue_lock);
  
  	return ret;
  }
87760e5ee   Jens Axboe   block: hook up wr...
399
400
401
402
403
404
405
406
407
408
409
410
  static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
  {
  	if (!q->rq_wb)
  		return -EINVAL;
  
  	return sprintf(page, "%llu
  ", div_u64(q->rq_wb->min_lat_nsec, 1000));
  }
  
  static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
  				  size_t count)
  {
80e091d10   Jens Axboe   blk-wbt: allow re...
411
  	struct rq_wb *rwb;
87760e5ee   Jens Axboe   block: hook up wr...
412
  	ssize_t ret;
80e091d10   Jens Axboe   blk-wbt: allow re...
413
  	s64 val;
87760e5ee   Jens Axboe   block: hook up wr...
414

87760e5ee   Jens Axboe   block: hook up wr...
415
416
417
  	ret = queue_var_store64(&val, page);
  	if (ret < 0)
  		return ret;
d62118b6d   Jens Axboe   blk-wbt: allow wb...
418
419
420
421
422
423
424
425
426
427
428
429
430
  	if (val < -1)
  		return -EINVAL;
  
  	rwb = q->rq_wb;
  	if (!rwb) {
  		ret = wbt_init(q);
  		if (ret)
  			return ret;
  
  		rwb = q->rq_wb;
  		if (!rwb)
  			return -EINVAL;
  	}
87760e5ee   Jens Axboe   block: hook up wr...
431

80e091d10   Jens Axboe   blk-wbt: allow re...
432
433
434
435
  	if (val == -1)
  		rwb->min_lat_nsec = wbt_default_latency_nsec(q);
  	else if (val >= 0)
  		rwb->min_lat_nsec = val * 1000ULL;
d62118b6d   Jens Axboe   blk-wbt: allow wb...
436
437
438
  
  	if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
  		rwb->enable_state = WBT_STATE_ON_MANUAL;
80e091d10   Jens Axboe   blk-wbt: allow re...
439
440
  
  	wbt_update_limits(rwb);
87760e5ee   Jens Axboe   block: hook up wr...
441
442
  	return count;
  }
93e9d8e83   Jens Axboe   block: add abilit...
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
  static ssize_t queue_wc_show(struct request_queue *q, char *page)
  {
  	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
  		return sprintf(page, "write back
  ");
  
  	return sprintf(page, "write through
  ");
  }
  
  static ssize_t queue_wc_store(struct request_queue *q, const char *page,
  			      size_t count)
  {
  	int set = -1;
  
  	if (!strncmp(page, "write back", 10))
  		set = 1;
  	else if (!strncmp(page, "write through", 13) ||
  		 !strncmp(page, "none", 4))
  		set = 0;
  
  	if (set == -1)
  		return -EINVAL;
  
  	spin_lock_irq(q->queue_lock);
  	if (set)
  		queue_flag_set(QUEUE_FLAG_WC, q);
  	else
  		queue_flag_clear(QUEUE_FLAG_WC, q);
  	spin_unlock_irq(q->queue_lock);
  
  	return count;
  }
ea6ca600e   Yigal Korman   block: expose QUE...
476
477
478
479
  static ssize_t queue_dax_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(blk_queue_dax(q), page);
  }
8324aa91d   Jens Axboe   block: split tag ...
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
  static struct queue_sysfs_entry queue_requests_entry = {
  	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_requests_show,
  	.store = queue_requests_store,
  };
  
  static struct queue_sysfs_entry queue_ra_entry = {
  	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_ra_show,
  	.store = queue_ra_store,
  };
  
  static struct queue_sysfs_entry queue_max_sectors_entry = {
  	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_max_sectors_show,
  	.store = queue_max_sectors_store,
  };
  
  static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
  	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
  	.show = queue_max_hw_sectors_show,
  };
c77a5710b   Martin K. Petersen   block: Export max...
502
503
504
505
  static struct queue_sysfs_entry queue_max_segments_entry = {
  	.attr = {.name = "max_segments", .mode = S_IRUGO },
  	.show = queue_max_segments_show,
  };
1e739730c   Christoph Hellwig   block: optionally...
506
507
508
509
  static struct queue_sysfs_entry queue_max_discard_segments_entry = {
  	.attr = {.name = "max_discard_segments", .mode = S_IRUGO },
  	.show = queue_max_discard_segments_show,
  };
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
510
511
512
513
  static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
  	.attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
  	.show = queue_max_integrity_segments_show,
  };
c77a5710b   Martin K. Petersen   block: Export max...
514
515
516
517
  static struct queue_sysfs_entry queue_max_segment_size_entry = {
  	.attr = {.name = "max_segment_size", .mode = S_IRUGO },
  	.show = queue_max_segment_size_show,
  };
8324aa91d   Jens Axboe   block: split tag ...
518
519
520
521
522
  static struct queue_sysfs_entry queue_iosched_entry = {
  	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
  	.show = elv_iosched_show,
  	.store = elv_iosched_store,
  };
e68b903c6   Martin K. Petersen   Expose hardware s...
523
524
  static struct queue_sysfs_entry queue_hw_sector_size_entry = {
  	.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
e1defc4ff   Martin K. Petersen   block: Do away wi...
525
526
527
528
529
530
  	.show = queue_logical_block_size_show,
  };
  
  static struct queue_sysfs_entry queue_logical_block_size_entry = {
  	.attr = {.name = "logical_block_size", .mode = S_IRUGO },
  	.show = queue_logical_block_size_show,
e68b903c6   Martin K. Petersen   Expose hardware s...
531
  };
c72758f33   Martin K. Petersen   block: Export I/O...
532
533
534
535
  static struct queue_sysfs_entry queue_physical_block_size_entry = {
  	.attr = {.name = "physical_block_size", .mode = S_IRUGO },
  	.show = queue_physical_block_size_show,
  };
87caf97cf   Hannes Reinecke   blk-sysfs: Add 'c...
536
537
538
539
  static struct queue_sysfs_entry queue_chunk_sectors_entry = {
  	.attr = {.name = "chunk_sectors", .mode = S_IRUGO },
  	.show = queue_chunk_sectors_show,
  };
c72758f33   Martin K. Petersen   block: Export I/O...
540
541
542
543
544
545
546
547
  static struct queue_sysfs_entry queue_io_min_entry = {
  	.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
  	.show = queue_io_min_show,
  };
  
  static struct queue_sysfs_entry queue_io_opt_entry = {
  	.attr = {.name = "optimal_io_size", .mode = S_IRUGO },
  	.show = queue_io_opt_show,
e68b903c6   Martin K. Petersen   Expose hardware s...
548
  };
86b372814   Martin K. Petersen   block: Expose dis...
549
550
551
552
  static struct queue_sysfs_entry queue_discard_granularity_entry = {
  	.attr = {.name = "discard_granularity", .mode = S_IRUGO },
  	.show = queue_discard_granularity_show,
  };
0034af036   Jens Axboe   block: make /sys/...
553
554
555
556
  static struct queue_sysfs_entry queue_discard_max_hw_entry = {
  	.attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO },
  	.show = queue_discard_max_hw_show,
  };
86b372814   Martin K. Petersen   block: Expose dis...
557
  static struct queue_sysfs_entry queue_discard_max_entry = {
0034af036   Jens Axboe   block: make /sys/...
558
  	.attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR },
86b372814   Martin K. Petersen   block: Expose dis...
559
  	.show = queue_discard_max_show,
0034af036   Jens Axboe   block: make /sys/...
560
  	.store = queue_discard_max_store,
86b372814   Martin K. Petersen   block: Expose dis...
561
  };
98262f276   Martin K. Petersen   block: Allow devi...
562
563
564
565
  static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
  	.attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
  	.show = queue_discard_zeroes_data_show,
  };
4363ac7c1   Martin K. Petersen   block: Implement ...
566
567
568
569
  static struct queue_sysfs_entry queue_write_same_max_entry = {
  	.attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
  	.show = queue_write_same_max_show,
  };
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
570
571
572
573
  static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
  	.attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO },
  	.show = queue_write_zeroes_max_show,
  };
1308835ff   Bartlomiej Zolnierkiewicz   block: export SSD...
574
575
  static struct queue_sysfs_entry queue_nonrot_entry = {
  	.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
956bcb7c1   Jens Axboe   block: add helper...
576
577
  	.show = queue_show_nonrot,
  	.store = queue_store_nonrot,
1308835ff   Bartlomiej Zolnierkiewicz   block: export SSD...
578
  };
797476b88   Damien Le Moal   block: Add 'zoned...
579
580
581
582
  static struct queue_sysfs_entry queue_zoned_entry = {
  	.attr = {.name = "zoned", .mode = S_IRUGO },
  	.show = queue_zoned_show,
  };
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
583
584
585
586
587
  static struct queue_sysfs_entry queue_nomerges_entry = {
  	.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_nomerges_show,
  	.store = queue_nomerges_store,
  };
c7c22e4d5   Jens Axboe   block: add suppor...
588
589
590
591
592
  static struct queue_sysfs_entry queue_rq_affinity_entry = {
  	.attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_rq_affinity_show,
  	.store = queue_rq_affinity_store,
  };
bc58ba946   Jens Axboe   block: add sysfs ...
593
594
  static struct queue_sysfs_entry queue_iostats_entry = {
  	.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
956bcb7c1   Jens Axboe   block: add helper...
595
596
  	.show = queue_show_iostats,
  	.store = queue_store_iostats,
bc58ba946   Jens Axboe   block: add sysfs ...
597
  };
e2e1a148b   Jens Axboe   block: add sysfs ...
598
599
  static struct queue_sysfs_entry queue_random_entry = {
  	.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
956bcb7c1   Jens Axboe   block: add helper...
600
601
  	.show = queue_show_random,
  	.store = queue_store_random,
e2e1a148b   Jens Axboe   block: add sysfs ...
602
  };
05229beed   Jens Axboe   block: add block ...
603
604
605
606
607
  static struct queue_sysfs_entry queue_poll_entry = {
  	.attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_poll_show,
  	.store = queue_poll_store,
  };
06426adf0   Jens Axboe   blk-mq: implement...
608
609
610
611
612
  static struct queue_sysfs_entry queue_poll_delay_entry = {
  	.attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_poll_delay_show,
  	.store = queue_poll_delay_store,
  };
93e9d8e83   Jens Axboe   block: add abilit...
613
614
615
616
617
  static struct queue_sysfs_entry queue_wc_entry = {
  	.attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_wc_show,
  	.store = queue_wc_store,
  };
ea6ca600e   Yigal Korman   block: expose QUE...
618
619
620
621
  static struct queue_sysfs_entry queue_dax_entry = {
  	.attr = {.name = "dax", .mode = S_IRUGO },
  	.show = queue_dax_show,
  };
87760e5ee   Jens Axboe   block: hook up wr...
622
623
624
625
626
  static struct queue_sysfs_entry queue_wb_lat_entry = {
  	.attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_wb_lat_show,
  	.store = queue_wb_lat_store,
  };
297e3d854   Shaohua Li   blk-throttle: mak...
627
628
629
630
631
632
633
  #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
  static struct queue_sysfs_entry throtl_sample_time_entry = {
  	.attr = {.name = "throttle_sample_time", .mode = S_IRUGO | S_IWUSR },
  	.show = blk_throtl_sample_time_show,
  	.store = blk_throtl_sample_time_store,
  };
  #endif
8324aa91d   Jens Axboe   block: split tag ...
634
635
636
637
638
  static struct attribute *default_attrs[] = {
  	&queue_requests_entry.attr,
  	&queue_ra_entry.attr,
  	&queue_max_hw_sectors_entry.attr,
  	&queue_max_sectors_entry.attr,
c77a5710b   Martin K. Petersen   block: Export max...
639
  	&queue_max_segments_entry.attr,
1e739730c   Christoph Hellwig   block: optionally...
640
  	&queue_max_discard_segments_entry.attr,
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
641
  	&queue_max_integrity_segments_entry.attr,
c77a5710b   Martin K. Petersen   block: Export max...
642
  	&queue_max_segment_size_entry.attr,
8324aa91d   Jens Axboe   block: split tag ...
643
  	&queue_iosched_entry.attr,
e68b903c6   Martin K. Petersen   Expose hardware s...
644
  	&queue_hw_sector_size_entry.attr,
e1defc4ff   Martin K. Petersen   block: Do away wi...
645
  	&queue_logical_block_size_entry.attr,
c72758f33   Martin K. Petersen   block: Export I/O...
646
  	&queue_physical_block_size_entry.attr,
87caf97cf   Hannes Reinecke   blk-sysfs: Add 'c...
647
  	&queue_chunk_sectors_entry.attr,
c72758f33   Martin K. Petersen   block: Export I/O...
648
649
  	&queue_io_min_entry.attr,
  	&queue_io_opt_entry.attr,
86b372814   Martin K. Petersen   block: Expose dis...
650
651
  	&queue_discard_granularity_entry.attr,
  	&queue_discard_max_entry.attr,
0034af036   Jens Axboe   block: make /sys/...
652
  	&queue_discard_max_hw_entry.attr,
98262f276   Martin K. Petersen   block: Allow devi...
653
  	&queue_discard_zeroes_data_entry.attr,
4363ac7c1   Martin K. Petersen   block: Implement ...
654
  	&queue_write_same_max_entry.attr,
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
655
  	&queue_write_zeroes_max_entry.attr,
1308835ff   Bartlomiej Zolnierkiewicz   block: export SSD...
656
  	&queue_nonrot_entry.attr,
797476b88   Damien Le Moal   block: Add 'zoned...
657
  	&queue_zoned_entry.attr,
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
658
  	&queue_nomerges_entry.attr,
c7c22e4d5   Jens Axboe   block: add suppor...
659
  	&queue_rq_affinity_entry.attr,
bc58ba946   Jens Axboe   block: add sysfs ...
660
  	&queue_iostats_entry.attr,
e2e1a148b   Jens Axboe   block: add sysfs ...
661
  	&queue_random_entry.attr,
05229beed   Jens Axboe   block: add block ...
662
  	&queue_poll_entry.attr,
93e9d8e83   Jens Axboe   block: add abilit...
663
  	&queue_wc_entry.attr,
ea6ca600e   Yigal Korman   block: expose QUE...
664
  	&queue_dax_entry.attr,
87760e5ee   Jens Axboe   block: hook up wr...
665
  	&queue_wb_lat_entry.attr,
06426adf0   Jens Axboe   blk-mq: implement...
666
  	&queue_poll_delay_entry.attr,
297e3d854   Shaohua Li   blk-throttle: mak...
667
668
669
  #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
  	&throtl_sample_time_entry.attr,
  #endif
8324aa91d   Jens Axboe   block: split tag ...
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
  	NULL,
  };
  
  #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
  
  static ssize_t
  queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  {
  	struct queue_sysfs_entry *entry = to_queue(attr);
  	struct request_queue *q =
  		container_of(kobj, struct request_queue, kobj);
  	ssize_t res;
  
  	if (!entry->show)
  		return -EIO;
  	mutex_lock(&q->sysfs_lock);
3f3299d5c   Bart Van Assche   block: Rename que...
686
  	if (blk_queue_dying(q)) {
8324aa91d   Jens Axboe   block: split tag ...
687
688
689
690
691
692
693
694
695
696
697
698
699
  		mutex_unlock(&q->sysfs_lock);
  		return -ENOENT;
  	}
  	res = entry->show(q, page);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t
  queue_attr_store(struct kobject *kobj, struct attribute *attr,
  		    const char *page, size_t length)
  {
  	struct queue_sysfs_entry *entry = to_queue(attr);
6728cb0e6   Jens Axboe   block: make core ...
700
  	struct request_queue *q;
8324aa91d   Jens Axboe   block: split tag ...
701
702
703
704
  	ssize_t res;
  
  	if (!entry->store)
  		return -EIO;
6728cb0e6   Jens Axboe   block: make core ...
705
706
  
  	q = container_of(kobj, struct request_queue, kobj);
8324aa91d   Jens Axboe   block: split tag ...
707
  	mutex_lock(&q->sysfs_lock);
3f3299d5c   Bart Van Assche   block: Rename que...
708
  	if (blk_queue_dying(q)) {
8324aa91d   Jens Axboe   block: split tag ...
709
710
711
712
713
714
715
  		mutex_unlock(&q->sysfs_lock);
  		return -ENOENT;
  	}
  	res = entry->store(q, page, length);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
548bc8e1b   Tejun Heo   block: RCU free r...
716
717
718
719
720
721
  static void blk_free_queue_rcu(struct rcu_head *rcu_head)
  {
  	struct request_queue *q = container_of(rcu_head, struct request_queue,
  					       rcu_head);
  	kmem_cache_free(blk_requestq_cachep, q);
  }
8324aa91d   Jens Axboe   block: split tag ...
722
  /**
dc9edc44d   Bart Van Assche   block: Fix a blk_...
723
724
   * __blk_release_queue - release a request queue when it is no longer needed
   * @work: pointer to the release_work member of the request queue to be released
8324aa91d   Jens Axboe   block: split tag ...
725
726
   *
   * Description:
dc9edc44d   Bart Van Assche   block: Fix a blk_...
727
728
729
730
   *     blk_release_queue is the counterpart of blk_init_queue(). It should be
   *     called when a request queue is being released; typically when a block
   *     device is being de-registered. Its primary task it to free the queue
   *     itself.
8324aa91d   Jens Axboe   block: split tag ...
731
   *
dc9edc44d   Bart Van Assche   block: Fix a blk_...
732
   * Notes:
45a9c9d90   Bart Van Assche   blk-mq: Fix a use...
733
734
   *     The low level driver must have finished any outstanding requests first
   *     via blk_cleanup_queue().
dc9edc44d   Bart Van Assche   block: Fix a blk_...
735
736
737
738
739
   *
   *     Although blk_release_queue() may be called with preemption disabled,
   *     __blk_release_queue() may sleep.
   */
  static void __blk_release_queue(struct work_struct *work)
8324aa91d   Jens Axboe   block: split tag ...
740
  {
dc9edc44d   Bart Van Assche   block: Fix a blk_...
741
  	struct request_queue *q = container_of(work, typeof(*q), release_work);
8324aa91d   Jens Axboe   block: split tag ...
742

34dbad5d2   Omar Sandoval   blk-stat: convert...
743
744
745
  	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
  		blk_stat_remove_callback(q, q->poll_cb);
  	blk_stat_free_callback(q->poll_cb);
d03f6cdc1   Jan Kara   block: Dynamicall...
746
  	bdi_put(q->backing_dev_info);
e8989fae3   Tejun Heo   blkcg: unify blkg...
747
  	blkcg_exit_queue(q);
7e5a87944   Tejun Heo   block, cfq: move ...
748
  	if (q->elevator) {
7e5a87944   Tejun Heo   block, cfq: move ...
749
  		ioc_clear_queue(q);
54d5329d4   Omar Sandoval   blk-mq-sched: fix...
750
  		elevator_exit(q, q->elevator);
7e5a87944   Tejun Heo   block, cfq: move ...
751
  	}
777eb1bf1   Hannes Reinecke   block: Free queue...
752

34dbad5d2   Omar Sandoval   blk-stat: convert...
753
  	blk_free_queue_stats(q->stats);
b425e5049   Bart Van Assche   block: Avoid that...
754
  	blk_exit_rl(q, &q->root_rl);
8324aa91d   Jens Axboe   block: split tag ...
755
756
757
  
  	if (q->queue_tags)
  		__blk_queue_free_tags(q);
6d247d7f7   Christoph Hellwig   block: allow spec...
758
759
760
  	if (!q->mq_ops) {
  		if (q->exit_rq_fn)
  			q->exit_rq_fn(q, q->fq->flush_rq);
f70ced091   Ming Lei   blk-mq: support p...
761
  		blk_free_flush_queue(q->fq);
6d247d7f7   Christoph Hellwig   block: allow spec...
762
  	} else {
e09aae7ed   Ming Lei   blk-mq: release m...
763
  		blk_mq_release(q);
6d247d7f7   Christoph Hellwig   block: allow spec...
764
  	}
18741986a   Christoph Hellwig   blk-mq: rework fl...
765

8324aa91d   Jens Axboe   block: split tag ...
766
  	blk_trace_shutdown(q);
62ebce16c   Omar Sandoval   blk-mq: move debu...
767
768
  	if (q->mq_ops)
  		blk_mq_debugfs_unregister(q);
54efd50bf   Kent Overstreet   block: make gener...
769
770
  	if (q->bio_split)
  		bioset_free(q->bio_split);
a73f730d0   Tejun Heo   block, cfq: move ...
771
  	ida_simple_remove(&blk_queue_ida, q->id);
548bc8e1b   Tejun Heo   block: RCU free r...
772
  	call_rcu(&q->rcu_head, blk_free_queue_rcu);
8324aa91d   Jens Axboe   block: split tag ...
773
  }
dc9edc44d   Bart Van Assche   block: Fix a blk_...
774
775
776
777
778
779
780
781
  static void blk_release_queue(struct kobject *kobj)
  {
  	struct request_queue *q =
  		container_of(kobj, struct request_queue, kobj);
  
  	INIT_WORK(&q->release_work, __blk_release_queue);
  	schedule_work(&q->release_work);
  }
52cf25d0a   Emese Revfy   Driver core: Cons...
782
  static const struct sysfs_ops queue_sysfs_ops = {
8324aa91d   Jens Axboe   block: split tag ...
783
784
785
786
787
788
789
790
791
  	.show	= queue_attr_show,
  	.store	= queue_attr_store,
  };
  
  struct kobj_type blk_queue_ktype = {
  	.sysfs_ops	= &queue_sysfs_ops,
  	.default_attrs	= default_attrs,
  	.release	= blk_release_queue,
  };
81bb3160a   Bart Van Assche   block: Protect le...
792
793
794
795
  /**
   * blk_register_queue - register a block layer queue with sysfs
   * @disk: Disk of which the request queue should be registered with sysfs.
   */
8324aa91d   Jens Axboe   block: split tag ...
796
797
798
  int blk_register_queue(struct gendisk *disk)
  {
  	int ret;
1d54ad6da   Li Zefan   blktrace: add tra...
799
  	struct device *dev = disk_to_dev(disk);
8324aa91d   Jens Axboe   block: split tag ...
800
  	struct request_queue *q = disk->queue;
fb1997463   Akinobu Mita   block: fix blk_re...
801
  	if (WARN_ON(!q))
8324aa91d   Jens Axboe   block: split tag ...
802
  		return -ENXIO;
334335d2f   Omar Sandoval   block: warn if sh...
803
804
805
806
807
  	WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
  		  "%s is registering an already registered queue
  ",
  		  kobject_name(&dev->kobj));
  	queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
749fefe67   Tejun Heo   block: lift the i...
808
  	/*
17497acbd   Tejun Heo   blk-mq, percpu_re...
809
810
811
812
813
814
815
  	 * SCSI probing may synchronously create and destroy a lot of
  	 * request_queues for non-existent devices.  Shutting down a fully
  	 * functional queue takes measureable wallclock time as RCU grace
  	 * periods are involved.  To avoid excessive latency in these
  	 * cases, a request_queue starts out in a degraded mode which is
  	 * faster to shut down and is made fully functional here as
  	 * request_queues for non-existent devices never get registered.
749fefe67   Tejun Heo   block: lift the i...
816
  	 */
df35c7c91   Alan Stern   Block: fix unbala...
817
818
  	if (!blk_queue_init_done(q)) {
  		queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
3ef28e83a   Dan Williams   block: generic re...
819
  		percpu_ref_switch_to_percpu(&q->q_usage_counter);
df35c7c91   Alan Stern   Block: fix unbala...
820
821
  		blk_queue_bypass_end(q);
  	}
749fefe67   Tejun Heo   block: lift the i...
822

1d54ad6da   Li Zefan   blktrace: add tra...
823
824
825
  	ret = blk_trace_init_sysfs(dev);
  	if (ret)
  		return ret;
b410aff2b   Tahsin Erdogan   block: do not all...
826
827
  	/* Prevent changes through sysfs until registration is completed. */
  	mutex_lock(&q->sysfs_lock);
c9059598e   Linus Torvalds   Merge branch 'for...
828
  	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
ed5302d3c   Liu Yuan   block, blk-sysfs:...
829
830
  	if (ret < 0) {
  		blk_trace_remove_sysfs(dev);
b410aff2b   Tahsin Erdogan   block: do not all...
831
  		goto unlock;
ed5302d3c   Liu Yuan   block, blk-sysfs:...
832
  	}
8324aa91d   Jens Axboe   block: split tag ...
833

a8ecdd711   Bart Van Assche   blk-mq: Only regi...
834
  	if (q->mq_ops) {
2d0364c8c   Bart Van Assche   blk-mq: Register ...
835
  		__blk_mq_register_dev(dev, q);
a8ecdd711   Bart Van Assche   blk-mq: Only regi...
836
837
  		blk_mq_debugfs_register(q);
  	}
9c1051aac   Omar Sandoval   blk-mq: untangle ...
838

8324aa91d   Jens Axboe   block: split tag ...
839
  	kobject_uevent(&q->kobj, KOBJ_ADD);
8330cdb0f   Jan Kara   block: Make write...
840
  	wbt_enable_default(q);
87760e5ee   Jens Axboe   block: hook up wr...
841

d61fcfa4b   Shaohua Li   blk-throttle: cho...
842
  	blk_throtl_register_queue(q);
80c6b1573   Omar Sandoval   blk-mq-sched: (un...
843
844
845
  	if (q->request_fn || (q->mq_ops && q->elevator)) {
  		ret = elv_register_queue(q);
  		if (ret) {
81bb3160a   Bart Van Assche   block: Protect le...
846
  			mutex_unlock(&q->sysfs_lock);
80c6b1573   Omar Sandoval   blk-mq-sched: (un...
847
848
849
850
  			kobject_uevent(&q->kobj, KOBJ_REMOVE);
  			kobject_del(&q->kobj);
  			blk_trace_remove_sysfs(dev);
  			kobject_put(&dev->kobj);
81bb3160a   Bart Van Assche   block: Protect le...
851
  			return ret;
80c6b1573   Omar Sandoval   blk-mq-sched: (un...
852
  		}
8324aa91d   Jens Axboe   block: split tag ...
853
  	}
b410aff2b   Tahsin Erdogan   block: do not all...
854
855
856
857
  	ret = 0;
  unlock:
  	mutex_unlock(&q->sysfs_lock);
  	return ret;
8324aa91d   Jens Axboe   block: split tag ...
858
  }
a3edeedd6   Mike Snitzer   block: allow gend...
859
  EXPORT_SYMBOL_GPL(blk_register_queue);
8324aa91d   Jens Axboe   block: split tag ...
860

81bb3160a   Bart Van Assche   block: Protect le...
861
862
863
864
865
866
867
  /**
   * blk_unregister_queue - counterpart of blk_register_queue()
   * @disk: Disk of which the request queue should be unregistered from sysfs.
   *
   * Note: the caller is responsible for guaranteeing that this function is called
   * after blk_register_queue() has finished.
   */
8324aa91d   Jens Axboe   block: split tag ...
868
869
870
  void blk_unregister_queue(struct gendisk *disk)
  {
  	struct request_queue *q = disk->queue;
fb1997463   Akinobu Mita   block: fix blk_re...
871
872
  	if (WARN_ON(!q))
  		return;
a3edeedd6   Mike Snitzer   block: allow gend...
873
874
875
  	/* Return early if disk->queue was never registered. */
  	if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
  		return;
f00fb650d   Mike Snitzer   block: properly p...
876
  	/*
81bb3160a   Bart Van Assche   block: Protect le...
877
878
879
  	 * Since sysfs_remove_dir() prevents adding new directory entries
  	 * before removal of existing entries starts, protect against
  	 * concurrent elv_iosched_store() calls.
f00fb650d   Mike Snitzer   block: properly p...
880
  	 */
e9a823fb3   David Jeffery   block: fix warnin...
881
  	mutex_lock(&q->sysfs_lock);
334335d2f   Omar Sandoval   block: warn if sh...
882

f00fb650d   Mike Snitzer   block: properly p...
883
884
885
  	spin_lock_irq(q->queue_lock);
  	queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
  	spin_unlock_irq(q->queue_lock);
02ba8893a   Omar Sandoval   block: fix leak o...
886

81bb3160a   Bart Van Assche   block: Protect le...
887
888
889
890
  	/*
  	 * Remove the sysfs attributes before unregistering the queue data
  	 * structures that can be modified through sysfs.
  	 */
320ae51fe   Jens Axboe   blk-mq: new multi...
891
  	if (q->mq_ops)
b21d5b301   Matias Bjørling   blk-mq: register ...
892
  		blk_mq_unregister_dev(disk_to_dev(disk), q);
81bb3160a   Bart Van Assche   block: Protect le...
893
  	mutex_unlock(&q->sysfs_lock);
8324aa91d   Jens Axboe   block: split tag ...
894

48c0d4d4c   Zdenek Kabelac   Add missing blk_t...
895
896
897
  	kobject_uevent(&q->kobj, KOBJ_REMOVE);
  	kobject_del(&q->kobj);
  	blk_trace_remove_sysfs(disk_to_dev(disk));
f00fb650d   Mike Snitzer   block: properly p...
898

81bb3160a   Bart Van Assche   block: Protect le...
899
900
901
902
903
  	wbt_exit(q);
  
  	mutex_lock(&q->sysfs_lock);
  	if (q->request_fn || (q->mq_ops && q->elevator))
  		elv_unregister_queue(q);
f00fb650d   Mike Snitzer   block: properly p...
904
  	mutex_unlock(&q->sysfs_lock);
81bb3160a   Bart Van Assche   block: Protect le...
905
906
  
  	kobject_put(&disk_to_dev(disk)->kobj);
8324aa91d   Jens Axboe   block: split tag ...
907
  }