Blame view

block/blk-sysfs.c 24.4 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
8324aa91d   Jens Axboe   block: split tag ...
2
3
4
5
  /*
   * Functions related to sysfs handling
   */
  #include <linux/kernel.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
6
  #include <linux/slab.h>
8324aa91d   Jens Axboe   block: split tag ...
7
8
9
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
66114cad6   Tejun Heo   writeback: separa...
10
  #include <linux/backing-dev.h>
8324aa91d   Jens Axboe   block: split tag ...
11
  #include <linux/blktrace_api.h>
320ae51fe   Jens Axboe   blk-mq: new multi...
12
  #include <linux/blk-mq.h>
eea8f41cc   Tejun Heo   blkcg: move block...
13
  #include <linux/blk-cgroup.h>
85e0cbbb8   Luis Chamberlain   block: create the...
14
  #include <linux/debugfs.h>
8324aa91d   Jens Axboe   block: split tag ...
15
16
  
  #include "blk.h"
3edcc0ce8   Ming Lei   block: blk-mq: do...
17
  #include "blk-mq.h"
d173a2516   Omar Sandoval   blk-mq: move debu...
18
  #include "blk-mq-debugfs.h"
87760e5ee   Jens Axboe   block: hook up wr...
19
  #include "blk-wbt.h"
8324aa91d   Jens Axboe   block: split tag ...
20
21
22
23
24
25
26
27
  
  struct queue_sysfs_entry {
  	struct attribute attr;
  	ssize_t (*show)(struct request_queue *, char *);
  	ssize_t (*store)(struct request_queue *, const char *, size_t);
  };
  
  static ssize_t
9cb308ce8   Xiaotian Feng   block: sysfs fix ...
28
  queue_var_show(unsigned long var, char *page)
8324aa91d   Jens Axboe   block: split tag ...
29
  {
9cb308ce8   Xiaotian Feng   block: sysfs fix ...
30
31
  	return sprintf(page, "%lu
  ", var);
8324aa91d   Jens Axboe   block: split tag ...
32
33
34
35
36
  }
  
  static ssize_t
  queue_var_store(unsigned long *var, const char *page, size_t count)
  {
b1f3b64d7   Dave Reisner   block: reject inv...
37
38
  	int err;
  	unsigned long v;
ed751e683   Jingoo Han   block/blk-sysfs.c...
39
  	err = kstrtoul(page, 10, &v);
b1f3b64d7   Dave Reisner   block: reject inv...
40
41
42
43
  	if (err || v > UINT_MAX)
  		return -EINVAL;
  
  	*var = v;
8324aa91d   Jens Axboe   block: split tag ...
44

8324aa91d   Jens Axboe   block: split tag ...
45
46
  	return count;
  }
80e091d10   Jens Axboe   blk-wbt: allow re...
47
  static ssize_t queue_var_store64(s64 *var, const char *page)
87760e5ee   Jens Axboe   block: hook up wr...
48
49
  {
  	int err;
80e091d10   Jens Axboe   blk-wbt: allow re...
50
  	s64 v;
87760e5ee   Jens Axboe   block: hook up wr...
51

80e091d10   Jens Axboe   blk-wbt: allow re...
52
  	err = kstrtos64(page, 10, &v);
87760e5ee   Jens Axboe   block: hook up wr...
53
54
55
56
57
58
  	if (err < 0)
  		return err;
  
  	*var = v;
  	return 0;
  }
8324aa91d   Jens Axboe   block: split tag ...
59
60
61
62
63
64
65
66
  static ssize_t queue_requests_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(q->nr_requests, (page));
  }
  
  static ssize_t
  queue_requests_store(struct request_queue *q, const char *page, size_t count)
  {
8324aa91d   Jens Axboe   block: split tag ...
67
  	unsigned long nr;
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
68
  	int ret, err;
b8a9ae779   Jens Axboe   block: don't assu...
69

344e9ffcb   Jens Axboe   block: add queue_...
70
  	if (!queue_is_mq(q))
b8a9ae779   Jens Axboe   block: don't assu...
71
72
73
  		return -EINVAL;
  
  	ret = queue_var_store(&nr, page, count);
b1f3b64d7   Dave Reisner   block: reject inv...
74
75
  	if (ret < 0)
  		return ret;
8324aa91d   Jens Axboe   block: split tag ...
76
77
  	if (nr < BLKDEV_MIN_RQ)
  		nr = BLKDEV_MIN_RQ;
a1ce35fa4   Jens Axboe   block: remove dea...
78
  	err = blk_mq_update_nr_requests(q, nr);
e3a2b3f93   Jens Axboe   blk-mq: allow cha...
79
80
  	if (err)
  		return err;
8324aa91d   Jens Axboe   block: split tag ...
81
82
83
84
85
  	return ret;
  }
  
  static ssize_t queue_ra_show(struct request_queue *q, char *page)
  {
dc3b17cc8   Jan Kara   block: Use pointe...
86
  	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
87
  					(PAGE_SHIFT - 10);
8324aa91d   Jens Axboe   block: split tag ...
88
89
90
91
92
93
94
95
96
  
  	return queue_var_show(ra_kb, (page));
  }
  
  static ssize_t
  queue_ra_store(struct request_queue *q, const char *page, size_t count)
  {
  	unsigned long ra_kb;
  	ssize_t ret = queue_var_store(&ra_kb, page, count);
b1f3b64d7   Dave Reisner   block: reject inv...
97
98
  	if (ret < 0)
  		return ret;
dc3b17cc8   Jan Kara   block: Use pointe...
99
  	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
8324aa91d   Jens Axboe   block: split tag ...
100
101
102
103
104
105
  
  	return ret;
  }
  
  static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
  {
ae03bf639   Martin K. Petersen   block: Use access...
106
  	int max_sectors_kb = queue_max_sectors(q) >> 1;
8324aa91d   Jens Axboe   block: split tag ...
107
108
109
  
  	return queue_var_show(max_sectors_kb, (page));
  }
c77a5710b   Martin K. Petersen   block: Export max...
110
111
112
113
  static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(queue_max_segments(q), (page));
  }
1e739730c   Christoph Hellwig   block: optionally...
114
115
116
117
118
  static ssize_t queue_max_discard_segments_show(struct request_queue *q,
  		char *page)
  {
  	return queue_var_show(queue_max_discard_segments(q), (page));
  }
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
119
120
121
122
  static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(q->limits.max_integrity_segments, (page));
  }
c77a5710b   Martin K. Petersen   block: Export max...
123
124
  static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
  {
38417468d   Christoph Hellwig   scsi: block: remo...
125
  	return queue_var_show(queue_max_segment_size(q), (page));
c77a5710b   Martin K. Petersen   block: Export max...
126
  }
e1defc4ff   Martin K. Petersen   block: Do away wi...
127
  static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
e68b903c6   Martin K. Petersen   Expose hardware s...
128
  {
e1defc4ff   Martin K. Petersen   block: Do away wi...
129
  	return queue_var_show(queue_logical_block_size(q), page);
e68b903c6   Martin K. Petersen   Expose hardware s...
130
  }
c72758f33   Martin K. Petersen   block: Export I/O...
131
132
133
134
  static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(queue_physical_block_size(q), page);
  }
87caf97cf   Hannes Reinecke   blk-sysfs: Add 'c...
135
136
137
138
  static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(q->limits.chunk_sectors, page);
  }
c72758f33   Martin K. Petersen   block: Export I/O...
139
140
141
142
143
144
145
146
  static ssize_t queue_io_min_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(queue_io_min(q), page);
  }
  
  static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(queue_io_opt(q), page);
e68b903c6   Martin K. Petersen   Expose hardware s...
147
  }
86b372814   Martin K. Petersen   block: Expose dis...
148
149
150
151
  static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(q->limits.discard_granularity, page);
  }
0034af036   Jens Axboe   block: make /sys/...
152
153
  static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
  {
0034af036   Jens Axboe   block: make /sys/...
154

18f922d03   Alan   blk: fix overflow...
155
156
157
  	return sprintf(page, "%llu
  ",
  		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
0034af036   Jens Axboe   block: make /sys/...
158
  }
86b372814   Martin K. Petersen   block: Expose dis...
159
160
  static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
  {
a934a00a6   Martin K. Petersen   block: Fix discar...
161
162
163
  	return sprintf(page, "%llu
  ",
  		       (unsigned long long)q->limits.max_discard_sectors << 9);
86b372814   Martin K. Petersen   block: Expose dis...
164
  }
0034af036   Jens Axboe   block: make /sys/...
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
  static ssize_t queue_discard_max_store(struct request_queue *q,
  				       const char *page, size_t count)
  {
  	unsigned long max_discard;
  	ssize_t ret = queue_var_store(&max_discard, page, count);
  
  	if (ret < 0)
  		return ret;
  
  	if (max_discard & (q->limits.discard_granularity - 1))
  		return -EINVAL;
  
  	max_discard >>= 9;
  	if (max_discard > UINT_MAX)
  		return -EINVAL;
  
  	if (max_discard > q->limits.max_hw_discard_sectors)
  		max_discard = q->limits.max_hw_discard_sectors;
  
  	q->limits.max_discard_sectors = max_discard;
  	return ret;
  }
98262f276   Martin K. Petersen   block: Allow devi...
187
188
  static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
  {
48920ff2a   Christoph Hellwig   block: remove the...
189
  	return queue_var_show(0, page);
98262f276   Martin K. Petersen   block: Allow devi...
190
  }
4363ac7c1   Martin K. Petersen   block: Implement ...
191
192
193
194
195
196
  static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
  {
  	return sprintf(page, "%llu
  ",
  		(unsigned long long)q->limits.max_write_same_sectors << 9);
  }
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
197
198
199
200
201
202
  static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
  {
  	return sprintf(page, "%llu
  ",
  		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
  }
4363ac7c1   Martin K. Petersen   block: Implement ...
203

0512a75b9   Keith Busch   block: Introduce ...
204
205
206
207
208
209
210
  static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
  {
  	unsigned long long max_sectors = q->limits.max_zone_append_sectors;
  
  	return sprintf(page, "%llu
  ", max_sectors << SECTOR_SHIFT);
  }
8324aa91d   Jens Axboe   block: split tag ...
211
212
213
214
  static ssize_t
  queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
  {
  	unsigned long max_sectors_kb,
ae03bf639   Martin K. Petersen   block: Use access...
215
  		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
216
  			page_kb = 1 << (PAGE_SHIFT - 10);
8324aa91d   Jens Axboe   block: split tag ...
217
  	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
b1f3b64d7   Dave Reisner   block: reject inv...
218
219
  	if (ret < 0)
  		return ret;
ca369d51b   Martin K. Petersen   block/sd: Fix dev...
220
221
  	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
  					 q->limits.max_dev_sectors >> 1);
8324aa91d   Jens Axboe   block: split tag ...
222
223
  	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
  		return -EINVAL;
7c239517d   Wu Fengguang   block: don't take...
224

0d945c1f9   Christoph Hellwig   block: remove the...
225
  	spin_lock_irq(&q->queue_lock);
c295fc057   Nikanth Karthikesan   block: Allow chan...
226
  	q->limits.max_sectors = max_sectors_kb << 1;
dc3b17cc8   Jan Kara   block: Use pointe...
227
  	q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
0d945c1f9   Christoph Hellwig   block: remove the...
228
  	spin_unlock_irq(&q->queue_lock);
8324aa91d   Jens Axboe   block: split tag ...
229
230
231
232
233
234
  
  	return ret;
  }
  
  static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
  {
ae03bf639   Martin K. Petersen   block: Use access...
235
  	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
8324aa91d   Jens Axboe   block: split tag ...
236
237
238
  
  	return queue_var_show(max_hw_sectors_kb, (page));
  }
956bcb7c1   Jens Axboe   block: add helper...
239
240
  #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
  static ssize_t								\
fc93fe145   Christoph Hellwig   block: make QUEUE...
241
  queue_##name##_show(struct request_queue *q, char *page)		\
956bcb7c1   Jens Axboe   block: add helper...
242
243
244
245
246
247
  {									\
  	int bit;							\
  	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
  	return queue_var_show(neg ? !bit : bit, page);			\
  }									\
  static ssize_t								\
fc93fe145   Christoph Hellwig   block: make QUEUE...
248
  queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
956bcb7c1   Jens Axboe   block: add helper...
249
250
251
252
  {									\
  	unsigned long val;						\
  	ssize_t ret;							\
  	ret = queue_var_store(&val, page, count);			\
c678ef528   Arnd Bergmann   block: avoid usin...
253
254
  	if (ret < 0)							\
  		 return ret;						\
956bcb7c1   Jens Axboe   block: add helper...
255
256
257
  	if (neg)							\
  		val = !val;						\
  									\
956bcb7c1   Jens Axboe   block: add helper...
258
  	if (val)							\
8814ce8a0   Bart Van Assche   block: Introduce ...
259
  		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
956bcb7c1   Jens Axboe   block: add helper...
260
  	else								\
8814ce8a0   Bart Van Assche   block: Introduce ...
261
  		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
956bcb7c1   Jens Axboe   block: add helper...
262
  	return ret;							\
1308835ff   Bartlomiej Zolnierkiewicz   block: export SSD...
263
  }
956bcb7c1   Jens Axboe   block: add helper...
264
265
266
  QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
  QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
  QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
1cb039f3d   Christoph Hellwig   bdi: replace BDI_...
267
  QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
956bcb7c1   Jens Axboe   block: add helper...
268
  #undef QUEUE_SYSFS_BIT_FNS
1308835ff   Bartlomiej Zolnierkiewicz   block: export SSD...
269

797476b88   Damien Le Moal   block: Add 'zoned...
270
271
272
273
274
275
276
277
278
279
280
281
282
283
  static ssize_t queue_zoned_show(struct request_queue *q, char *page)
  {
  	switch (blk_queue_zoned_model(q)) {
  	case BLK_ZONED_HA:
  		return sprintf(page, "host-aware
  ");
  	case BLK_ZONED_HM:
  		return sprintf(page, "host-managed
  ");
  	default:
  		return sprintf(page, "none
  ");
  	}
  }
965b652e9   Damien Le Moal   block: Expose que...
284
285
286
287
  static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(blk_queue_nr_zones(q), page);
  }
e15864f8e   Niklas Cassel   block: add max_op...
288
289
290
291
  static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(queue_max_open_zones(q), page);
  }
659bf827b   Niklas Cassel   block: add max_ac...
292
293
294
295
  static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(queue_max_active_zones(q), page);
  }
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
296
297
  static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
  {
488991e28   Alan D. Brunelle   block: Added in s...
298
299
  	return queue_var_show((blk_queue_nomerges(q) << 1) |
  			       blk_queue_noxmerges(q), page);
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
300
301
302
303
304
305
306
  }
  
  static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
  				    size_t count)
  {
  	unsigned long nm;
  	ssize_t ret = queue_var_store(&nm, page, count);
b1f3b64d7   Dave Reisner   block: reject inv...
307
308
  	if (ret < 0)
  		return ret;
57d74df90   Christoph Hellwig   block: use atomic...
309
310
  	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
  	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
488991e28   Alan D. Brunelle   block: Added in s...
311
  	if (nm == 2)
57d74df90   Christoph Hellwig   block: use atomic...
312
  		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
488991e28   Alan D. Brunelle   block: Added in s...
313
  	else if (nm)
57d74df90   Christoph Hellwig   block: use atomic...
314
  		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
1308835ff   Bartlomiej Zolnierkiewicz   block: export SSD...
315

ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
316
317
  	return ret;
  }
c7c22e4d5   Jens Axboe   block: add suppor...
318
319
  static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
  {
9cb308ce8   Xiaotian Feng   block: sysfs fix ...
320
  	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
5757a6d76   Dan Williams   block: strict rq_...
321
  	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
c7c22e4d5   Jens Axboe   block: add suppor...
322

5757a6d76   Dan Williams   block: strict rq_...
323
  	return queue_var_show(set << force, page);
c7c22e4d5   Jens Axboe   block: add suppor...
324
325
326
327
328
329
  }
  
  static ssize_t
  queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
  {
  	ssize_t ret = -EINVAL;
0a06ff068   Christoph Hellwig   kernel: remove CO...
330
  #ifdef CONFIG_SMP
c7c22e4d5   Jens Axboe   block: add suppor...
331
332
333
  	unsigned long val;
  
  	ret = queue_var_store(&val, page, count);
b1f3b64d7   Dave Reisner   block: reject inv...
334
335
  	if (ret < 0)
  		return ret;
e8037d498   Eric Seppanen   block: Fix queue_...
336
  	if (val == 2) {
57d74df90   Christoph Hellwig   block: use atomic...
337
338
  		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
  		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
e8037d498   Eric Seppanen   block: Fix queue_...
339
  	} else if (val == 1) {
57d74df90   Christoph Hellwig   block: use atomic...
340
341
  		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
  		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
e8037d498   Eric Seppanen   block: Fix queue_...
342
  	} else if (val == 0) {
57d74df90   Christoph Hellwig   block: use atomic...
343
344
  		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
  		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
5757a6d76   Dan Williams   block: strict rq_...
345
  	}
c7c22e4d5   Jens Axboe   block: add suppor...
346
347
348
  #endif
  	return ret;
  }
8324aa91d   Jens Axboe   block: split tag ...
349

06426adf0   Jens Axboe   blk-mq: implement...
350
351
  static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
  {
64f1c21e8   Jens Axboe   blk-mq: make the ...
352
  	int val;
29ece8b43   Yufen Yu   block: add BLK_MQ...
353
354
  	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
  		val = BLK_MQ_POLL_CLASSIC;
64f1c21e8   Jens Axboe   blk-mq: make the ...
355
356
357
358
359
  	else
  		val = q->poll_nsec / 1000;
  
  	return sprintf(page, "%d
  ", val);
06426adf0   Jens Axboe   blk-mq: implement...
360
361
362
363
364
  }
  
  static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
  				size_t count)
  {
64f1c21e8   Jens Axboe   blk-mq: make the ...
365
  	int err, val;
06426adf0   Jens Axboe   blk-mq: implement...
366
367
368
  
  	if (!q->mq_ops || !q->mq_ops->poll)
  		return -EINVAL;
64f1c21e8   Jens Axboe   blk-mq: make the ...
369
370
371
  	err = kstrtoint(page, 10, &val);
  	if (err < 0)
  		return err;
06426adf0   Jens Axboe   blk-mq: implement...
372

29ece8b43   Yufen Yu   block: add BLK_MQ...
373
374
375
  	if (val == BLK_MQ_POLL_CLASSIC)
  		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
  	else if (val >= 0)
64f1c21e8   Jens Axboe   blk-mq: make the ...
376
  		q->poll_nsec = val * 1000;
29ece8b43   Yufen Yu   block: add BLK_MQ...
377
378
  	else
  		return -EINVAL;
64f1c21e8   Jens Axboe   blk-mq: make the ...
379
380
  
  	return count;
06426adf0   Jens Axboe   blk-mq: implement...
381
  }
05229beed   Jens Axboe   block: add block ...
382
383
384
385
386
387
388
389
390
391
  static ssize_t queue_poll_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
  }
  
  static ssize_t queue_poll_store(struct request_queue *q, const char *page,
  				size_t count)
  {
  	unsigned long poll_on;
  	ssize_t ret;
cd19181bf   Ming Lei   blk-mq: enable IO...
392
393
  	if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
  	    !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
05229beed   Jens Axboe   block: add block ...
394
395
396
397
398
  		return -EINVAL;
  
  	ret = queue_var_store(&poll_on, page, count);
  	if (ret < 0)
  		return ret;
05229beed   Jens Axboe   block: add block ...
399
  	if (poll_on)
8814ce8a0   Bart Van Assche   block: Introduce ...
400
  		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
05229beed   Jens Axboe   block: add block ...
401
  	else
8814ce8a0   Bart Van Assche   block: Introduce ...
402
  		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
05229beed   Jens Axboe   block: add block ...
403
404
405
  
  	return ret;
  }
65cd1d13b   Weiping Zhang   block: add io tim...
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
  static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
  {
  	return sprintf(page, "%u
  ", jiffies_to_msecs(q->rq_timeout));
  }
  
  static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
  				  size_t count)
  {
  	unsigned int val;
  	int err;
  
  	err = kstrtou32(page, 10, &val);
  	if (err || val == 0)
  		return -EINVAL;
  
  	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
  
  	return count;
  }
87760e5ee   Jens Axboe   block: hook up wr...
426
427
  static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
  {
a79050434   Josef Bacik   blk-rq-qos: refac...
428
  	if (!wbt_rq_qos(q))
87760e5ee   Jens Axboe   block: hook up wr...
429
  		return -EINVAL;
a79050434   Josef Bacik   blk-rq-qos: refac...
430
431
  	return sprintf(page, "%llu
  ", div_u64(wbt_get_min_lat(q), 1000));
87760e5ee   Jens Axboe   block: hook up wr...
432
433
434
435
436
  }
  
  static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
  				  size_t count)
  {
a79050434   Josef Bacik   blk-rq-qos: refac...
437
  	struct rq_qos *rqos;
87760e5ee   Jens Axboe   block: hook up wr...
438
  	ssize_t ret;
80e091d10   Jens Axboe   blk-wbt: allow re...
439
  	s64 val;
87760e5ee   Jens Axboe   block: hook up wr...
440

87760e5ee   Jens Axboe   block: hook up wr...
441
442
443
  	ret = queue_var_store64(&val, page);
  	if (ret < 0)
  		return ret;
d62118b6d   Jens Axboe   blk-wbt: allow wb...
444
445
  	if (val < -1)
  		return -EINVAL;
a79050434   Josef Bacik   blk-rq-qos: refac...
446
447
  	rqos = wbt_rq_qos(q);
  	if (!rqos) {
d62118b6d   Jens Axboe   blk-wbt: allow wb...
448
449
450
  		ret = wbt_init(q);
  		if (ret)
  			return ret;
d62118b6d   Jens Axboe   blk-wbt: allow wb...
451
  	}
87760e5ee   Jens Axboe   block: hook up wr...
452

80e091d10   Jens Axboe   blk-wbt: allow re...
453
  	if (val == -1)
a79050434   Josef Bacik   blk-rq-qos: refac...
454
  		val = wbt_default_latency_nsec(q);
80e091d10   Jens Axboe   blk-wbt: allow re...
455
  	else if (val >= 0)
a79050434   Josef Bacik   blk-rq-qos: refac...
456
  		val *= 1000ULL;
d62118b6d   Jens Axboe   blk-wbt: allow wb...
457

b7143fe67   Aleksei Zakharov   block: avoid sett...
458
459
  	if (wbt_get_min_lat(q) == val)
  		return count;
c125311d9   Jens Axboe   blk-wbt: don't ma...
460
461
462
463
464
  	/*
  	 * Ensure that the queue is idled, in case the latency update
  	 * ends up either enabling or disabling wbt completely. We can't
  	 * have IO inflight if that happens.
  	 */
a1ce35fa4   Jens Axboe   block: remove dea...
465
466
  	blk_mq_freeze_queue(q);
  	blk_mq_quiesce_queue(q);
80e091d10   Jens Axboe   blk-wbt: allow re...
467

c125311d9   Jens Axboe   blk-wbt: don't ma...
468
  	wbt_set_min_lat(q, val);
c125311d9   Jens Axboe   blk-wbt: don't ma...
469

a1ce35fa4   Jens Axboe   block: remove dea...
470
471
  	blk_mq_unquiesce_queue(q);
  	blk_mq_unfreeze_queue(q);
c125311d9   Jens Axboe   blk-wbt: don't ma...
472

87760e5ee   Jens Axboe   block: hook up wr...
473
474
  	return count;
  }
93e9d8e83   Jens Axboe   block: add abilit...
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
  static ssize_t queue_wc_show(struct request_queue *q, char *page)
  {
  	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
  		return sprintf(page, "write back
  ");
  
  	return sprintf(page, "write through
  ");
  }
  
  static ssize_t queue_wc_store(struct request_queue *q, const char *page,
  			      size_t count)
  {
  	int set = -1;
  
  	if (!strncmp(page, "write back", 10))
  		set = 1;
  	else if (!strncmp(page, "write through", 13) ||
  		 !strncmp(page, "none", 4))
  		set = 0;
  
  	if (set == -1)
  		return -EINVAL;
93e9d8e83   Jens Axboe   block: add abilit...
498
  	if (set)
8814ce8a0   Bart Van Assche   block: Introduce ...
499
  		blk_queue_flag_set(QUEUE_FLAG_WC, q);
93e9d8e83   Jens Axboe   block: add abilit...
500
  	else
8814ce8a0   Bart Van Assche   block: Introduce ...
501
  		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
93e9d8e83   Jens Axboe   block: add abilit...
502
503
504
  
  	return count;
  }
6fcefbe57   Kent Overstreet   block: Add sysfs ...
505
506
507
508
509
  static ssize_t queue_fua_show(struct request_queue *q, char *page)
  {
  	return sprintf(page, "%u
  ", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
  }
ea6ca600e   Yigal Korman   block: expose QUE...
510
511
512
513
  static ssize_t queue_dax_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(blk_queue_dax(q), page);
  }
356261470   Christoph Hellwig   block: add helper...
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
  #define QUEUE_RO_ENTRY(_prefix, _name)			\
  static struct queue_sysfs_entry _prefix##_entry = {	\
  	.attr	= { .name = _name, .mode = 0444 },	\
  	.show	= _prefix##_show,			\
  };
  
  #define QUEUE_RW_ENTRY(_prefix, _name)			\
  static struct queue_sysfs_entry _prefix##_entry = {	\
  	.attr	= { .name = _name, .mode = 0644 },	\
  	.show	= _prefix##_show,			\
  	.store	= _prefix##_store,			\
  };
  
  QUEUE_RW_ENTRY(queue_requests, "nr_requests");
  QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
  QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
  QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
  QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
  QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
  QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
  QUEUE_RW_ENTRY(elv_iosched, "scheduler");
  
  QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
  QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
  QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
  QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
  QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
  
  QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
  QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
  QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
  QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
  QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
  
  QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
  QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
  QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
  
  QUEUE_RO_ENTRY(queue_zoned, "zoned");
  QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
  QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
  QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
  
  QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
  QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
  QUEUE_RW_ENTRY(queue_poll, "io_poll");
  QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
  QUEUE_RW_ENTRY(queue_wc, "write_cache");
  QUEUE_RO_ENTRY(queue_fua, "fua");
  QUEUE_RO_ENTRY(queue_dax, "dax");
  QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
  QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
8324aa91d   Jens Axboe   block: split tag ...
566

356261470   Christoph Hellwig   block: add helper...
567
568
569
  #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
  QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
  #endif
8324aa91d   Jens Axboe   block: split tag ...
570

356261470   Christoph Hellwig   block: add helper...
571
  /* legacy alias for logical_block_size: */
e68b903c6   Martin K. Petersen   Expose hardware s...
572
  static struct queue_sysfs_entry queue_hw_sector_size_entry = {
5657a819a   Joe Perches   block drivers/blo...
573
  	.attr = {.name = "hw_sector_size", .mode = 0444 },
e1defc4ff   Martin K. Petersen   block: Do away wi...
574
575
  	.show = queue_logical_block_size_show,
  };
fc93fe145   Christoph Hellwig   block: make QUEUE...
576
577
578
  QUEUE_RW_ENTRY(queue_nonrot, "rotational");
  QUEUE_RW_ENTRY(queue_iostats, "iostats");
  QUEUE_RW_ENTRY(queue_random, "add_random");
1cb039f3d   Christoph Hellwig   bdi: replace BDI_...
579
  QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
e2e1a148b   Jens Axboe   block: add sysfs ...
580

4d25339e3   Weiping Zhang   block: don't show...
581
  static struct attribute *queue_attrs[] = {
8324aa91d   Jens Axboe   block: split tag ...
582
583
584
585
  	&queue_requests_entry.attr,
  	&queue_ra_entry.attr,
  	&queue_max_hw_sectors_entry.attr,
  	&queue_max_sectors_entry.attr,
c77a5710b   Martin K. Petersen   block: Export max...
586
  	&queue_max_segments_entry.attr,
1e739730c   Christoph Hellwig   block: optionally...
587
  	&queue_max_discard_segments_entry.attr,
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
588
  	&queue_max_integrity_segments_entry.attr,
c77a5710b   Martin K. Petersen   block: Export max...
589
  	&queue_max_segment_size_entry.attr,
356261470   Christoph Hellwig   block: add helper...
590
  	&elv_iosched_entry.attr,
e68b903c6   Martin K. Petersen   Expose hardware s...
591
  	&queue_hw_sector_size_entry.attr,
e1defc4ff   Martin K. Petersen   block: Do away wi...
592
  	&queue_logical_block_size_entry.attr,
c72758f33   Martin K. Petersen   block: Export I/O...
593
  	&queue_physical_block_size_entry.attr,
87caf97cf   Hannes Reinecke   blk-sysfs: Add 'c...
594
  	&queue_chunk_sectors_entry.attr,
c72758f33   Martin K. Petersen   block: Export I/O...
595
596
  	&queue_io_min_entry.attr,
  	&queue_io_opt_entry.attr,
86b372814   Martin K. Petersen   block: Expose dis...
597
598
  	&queue_discard_granularity_entry.attr,
  	&queue_discard_max_entry.attr,
0034af036   Jens Axboe   block: make /sys/...
599
  	&queue_discard_max_hw_entry.attr,
98262f276   Martin K. Petersen   block: Allow devi...
600
  	&queue_discard_zeroes_data_entry.attr,
4363ac7c1   Martin K. Petersen   block: Implement ...
601
  	&queue_write_same_max_entry.attr,
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
602
  	&queue_write_zeroes_max_entry.attr,
0512a75b9   Keith Busch   block: Introduce ...
603
  	&queue_zone_append_max_entry.attr,
1308835ff   Bartlomiej Zolnierkiewicz   block: export SSD...
604
  	&queue_nonrot_entry.attr,
797476b88   Damien Le Moal   block: Add 'zoned...
605
  	&queue_zoned_entry.attr,
965b652e9   Damien Le Moal   block: Expose que...
606
  	&queue_nr_zones_entry.attr,
e15864f8e   Niklas Cassel   block: add max_op...
607
  	&queue_max_open_zones_entry.attr,
659bf827b   Niklas Cassel   block: add max_ac...
608
  	&queue_max_active_zones_entry.attr,
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
609
  	&queue_nomerges_entry.attr,
c7c22e4d5   Jens Axboe   block: add suppor...
610
  	&queue_rq_affinity_entry.attr,
bc58ba946   Jens Axboe   block: add sysfs ...
611
  	&queue_iostats_entry.attr,
1cb039f3d   Christoph Hellwig   bdi: replace BDI_...
612
  	&queue_stable_writes_entry.attr,
e2e1a148b   Jens Axboe   block: add sysfs ...
613
  	&queue_random_entry.attr,
05229beed   Jens Axboe   block: add block ...
614
  	&queue_poll_entry.attr,
93e9d8e83   Jens Axboe   block: add abilit...
615
  	&queue_wc_entry.attr,
6fcefbe57   Kent Overstreet   block: Add sysfs ...
616
  	&queue_fua_entry.attr,
ea6ca600e   Yigal Korman   block: expose QUE...
617
  	&queue_dax_entry.attr,
87760e5ee   Jens Axboe   block: hook up wr...
618
  	&queue_wb_lat_entry.attr,
06426adf0   Jens Axboe   blk-mq: implement...
619
  	&queue_poll_delay_entry.attr,
65cd1d13b   Weiping Zhang   block: add io tim...
620
  	&queue_io_timeout_entry.attr,
297e3d854   Shaohua Li   blk-throttle: mak...
621
  #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
356261470   Christoph Hellwig   block: add helper...
622
  	&blk_throtl_sample_time_entry.attr,
297e3d854   Shaohua Li   blk-throttle: mak...
623
  #endif
8324aa91d   Jens Axboe   block: split tag ...
624
625
  	NULL,
  };
4d25339e3   Weiping Zhang   block: don't show...
626
627
628
629
630
631
632
633
634
  static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
  				int n)
  {
  	struct request_queue *q =
  		container_of(kobj, struct request_queue, kobj);
  
  	if (attr == &queue_io_timeout_entry.attr &&
  		(!q->mq_ops || !q->mq_ops->timeout))
  			return 0;
659bf827b   Niklas Cassel   block: add max_ac...
635
636
  	if ((attr == &queue_max_open_zones_entry.attr ||
  	     attr == &queue_max_active_zones_entry.attr) &&
e15864f8e   Niklas Cassel   block: add max_op...
637
638
  	    !blk_queue_is_zoned(q))
  		return 0;
4d25339e3   Weiping Zhang   block: don't show...
639
640
641
642
643
644
645
  	return attr->mode;
  }
  
  static struct attribute_group queue_attr_group = {
  	.attrs = queue_attrs,
  	.is_visible = queue_attr_visible,
  };
8324aa91d   Jens Axboe   block: split tag ...
646
647
648
649
650
651
652
653
654
655
656
657
658
  #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
  
  static ssize_t
  queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  {
  	struct queue_sysfs_entry *entry = to_queue(attr);
  	struct request_queue *q =
  		container_of(kobj, struct request_queue, kobj);
  	ssize_t res;
  
  	if (!entry->show)
  		return -EIO;
  	mutex_lock(&q->sysfs_lock);
8324aa91d   Jens Axboe   block: split tag ...
659
660
661
662
663
664
665
666
667
668
  	res = entry->show(q, page);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t
  queue_attr_store(struct kobject *kobj, struct attribute *attr,
  		    const char *page, size_t length)
  {
  	struct queue_sysfs_entry *entry = to_queue(attr);
6728cb0e6   Jens Axboe   block: make core ...
669
  	struct request_queue *q;
8324aa91d   Jens Axboe   block: split tag ...
670
671
672
673
  	ssize_t res;
  
  	if (!entry->store)
  		return -EIO;
6728cb0e6   Jens Axboe   block: make core ...
674
675
  
  	q = container_of(kobj, struct request_queue, kobj);
8324aa91d   Jens Axboe   block: split tag ...
676
  	mutex_lock(&q->sysfs_lock);
8324aa91d   Jens Axboe   block: split tag ...
677
678
679
680
  	res = entry->store(q, page, length);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
548bc8e1b   Tejun Heo   block: RCU free r...
681
682
683
684
685
686
  static void blk_free_queue_rcu(struct rcu_head *rcu_head)
  {
  	struct request_queue *q = container_of(rcu_head, struct request_queue,
  					       rcu_head);
  	kmem_cache_free(blk_requestq_cachep, q);
  }
47cdee29e   Ming Lei   block: move blk_e...
687
688
689
690
691
692
693
694
695
696
  /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
  static void blk_exit_queue(struct request_queue *q)
  {
  	/*
  	 * Since the I/O scheduler exit code may access cgroup information,
  	 * perform I/O scheduler exit before disassociating from the block
  	 * cgroup controller.
  	 */
  	if (q->elevator) {
  		ioc_clear_queue(q);
c3e221921   Ming Lei   block: free sched...
697
  		__elevator_exit(q, q->elevator);
47cdee29e   Ming Lei   block: move blk_e...
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
  	}
  
  	/*
  	 * Remove all references to @q from the block cgroup controller before
  	 * restoring @q->queue_lock to avoid that restoring this pointer causes
  	 * e.g. blkcg_print_blkgs() to crash.
  	 */
  	blkcg_exit_queue(q);
  
  	/*
  	 * Since the cgroup code may dereference the @q->backing_dev_info
  	 * pointer, only decrease its reference count after having removed the
  	 * association with the block cgroup controller.
  	 */
  	bdi_put(q->backing_dev_info);
  }
8324aa91d   Jens Axboe   block: split tag ...
714
  /**
e8c7d14ac   Luis Chamberlain   block: revert bac...
715
716
717
718
719
720
721
722
723
724
725
726
727
   * blk_release_queue - releases all allocated resources of the request_queue
   * @kobj: pointer to a kobject, whose container is a request_queue
   *
   * This function releases all allocated resources of the request queue.
   *
   * The struct request_queue refcount is incremented with blk_get_queue() and
   * decremented with blk_put_queue(). Once the refcount reaches 0 this function
   * is called.
   *
   * For drivers that have a request_queue on a gendisk and added with
   * __device_add_disk() the refcount to request_queue will reach 0 with
   * the last put_disk() called by the driver. For drivers which don't use
   * __device_add_disk() this happens with blk_cleanup_queue().
8324aa91d   Jens Axboe   block: split tag ...
728
   *
e8c7d14ac   Luis Chamberlain   block: revert bac...
729
730
731
732
   * Drivers exist which depend on the release of the request_queue to be
   * synchronous, it should not be deferred.
   *
   * Context: can sleep
dc9edc44d   Bart Van Assche   block: Fix a blk_...
733
   */
e8c7d14ac   Luis Chamberlain   block: revert bac...
734
  static void blk_release_queue(struct kobject *kobj)
8324aa91d   Jens Axboe   block: split tag ...
735
  {
e8c7d14ac   Luis Chamberlain   block: revert bac...
736
737
738
739
  	struct request_queue *q =
  		container_of(kobj, struct request_queue, kobj);
  
  	might_sleep();
8324aa91d   Jens Axboe   block: split tag ...
740

34dbad5d2   Omar Sandoval   blk-stat: convert...
741
742
743
  	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
  		blk_stat_remove_callback(q, q->poll_cb);
  	blk_stat_free_callback(q->poll_cb);
777eb1bf1   Hannes Reinecke   block: Free queue...
744

34dbad5d2   Omar Sandoval   blk-stat: convert...
745
  	blk_free_queue_stats(q->stats);
47ce030b7   Yang Yang   blk-mq: move canc...
746
747
748
  	if (queue_is_mq(q)) {
  		struct blk_mq_hw_ctx *hctx;
  		int i;
e26cc0826   zhengbin   blk-mq: move canc...
749
  		cancel_delayed_work_sync(&q->requeue_work);
47ce030b7   Yang Yang   blk-mq: move canc...
750
751
752
  		queue_for_each_hw_ctx(q, hctx, i)
  			cancel_delayed_work_sync(&hctx->run_work);
  	}
47cdee29e   Ming Lei   block: move blk_e...
753
  	blk_exit_queue(q);
bf5054569   Damien Le Moal   block: Introduce ...
754
  	blk_queue_free_zone_bitmaps(q);
344e9ffcb   Jens Axboe   block: add queue_...
755
  	if (queue_is_mq(q))
e09aae7ed   Ming Lei   blk-mq: release m...
756
  		blk_mq_release(q);
18741986a   Christoph Hellwig   blk-mq: rework fl...
757

8324aa91d   Jens Axboe   block: split tag ...
758
  	blk_trace_shutdown(q);
85e0cbbb8   Luis Chamberlain   block: create the...
759
760
761
  	mutex_lock(&q->debugfs_mutex);
  	debugfs_remove_recursive(q->debugfs_dir);
  	mutex_unlock(&q->debugfs_mutex);
8324aa91d   Jens Axboe   block: split tag ...
762

344e9ffcb   Jens Axboe   block: add queue_...
763
  	if (queue_is_mq(q))
62ebce16c   Omar Sandoval   blk-mq: move debu...
764
  		blk_mq_debugfs_unregister(q);
338aa96d5   Kent Overstreet   block: convert bo...
765
  	bioset_exit(&q->bio_split);
54efd50bf   Kent Overstreet   block: make gener...
766

a73f730d0   Tejun Heo   block, cfq: move ...
767
  	ida_simple_remove(&blk_queue_ida, q->id);
548bc8e1b   Tejun Heo   block: RCU free r...
768
  	call_rcu(&q->rcu_head, blk_free_queue_rcu);
8324aa91d   Jens Axboe   block: split tag ...
769
  }
52cf25d0a   Emese Revfy   Driver core: Cons...
770
  static const struct sysfs_ops queue_sysfs_ops = {
8324aa91d   Jens Axboe   block: split tag ...
771
772
773
774
775
776
  	.show	= queue_attr_show,
  	.store	= queue_attr_store,
  };
  
  struct kobj_type blk_queue_ktype = {
  	.sysfs_ops	= &queue_sysfs_ops,
8324aa91d   Jens Axboe   block: split tag ...
777
778
  	.release	= blk_release_queue,
  };
2c2086afc   Bart Van Assche   block: Protect le...
779
780
781
782
  /**
   * blk_register_queue - register a block layer queue with sysfs
   * @disk: Disk of which the request queue should be registered with sysfs.
   */
8324aa91d   Jens Axboe   block: split tag ...
783
784
785
  int blk_register_queue(struct gendisk *disk)
  {
  	int ret;
1d54ad6da   Li Zefan   blktrace: add tra...
786
  	struct device *dev = disk_to_dev(disk);
8324aa91d   Jens Axboe   block: split tag ...
787
  	struct request_queue *q = disk->queue;
fb1997463   Akinobu Mita   block: fix blk_re...
788
  	if (WARN_ON(!q))
8324aa91d   Jens Axboe   block: split tag ...
789
  		return -ENXIO;
58c898ba3   Ming Lei   block: add helper...
790
  	WARN_ONCE(blk_queue_registered(q),
334335d2f   Omar Sandoval   block: warn if sh...
791
792
793
  		  "%s is registering an already registered queue
  ",
  		  kobject_name(&dev->kobj));
334335d2f   Omar Sandoval   block: warn if sh...
794

749fefe67   Tejun Heo   block: lift the i...
795
  	/*
17497acbd   Tejun Heo   blk-mq, percpu_re...
796
797
798
799
800
801
802
  	 * SCSI probing may synchronously create and destroy a lot of
  	 * request_queues for non-existent devices.  Shutting down a fully
  	 * functional queue takes measureable wallclock time as RCU grace
  	 * periods are involved.  To avoid excessive latency in these
  	 * cases, a request_queue starts out in a degraded mode which is
  	 * faster to shut down and is made fully functional here as
  	 * request_queues for non-existent devices never get registered.
749fefe67   Tejun Heo   block: lift the i...
803
  	 */
df35c7c91   Alan Stern   Block: fix unbala...
804
  	if (!blk_queue_init_done(q)) {
57d74df90   Christoph Hellwig   block: use atomic...
805
  		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
3ef28e83a   Dan Williams   block: generic re...
806
  		percpu_ref_switch_to_percpu(&q->q_usage_counter);
df35c7c91   Alan Stern   Block: fix unbala...
807
  	}
749fefe67   Tejun Heo   block: lift the i...
808

c2e4cd57c   Christoph Hellwig   block: lift setti...
809
  	blk_queue_update_readahead(q);
1d54ad6da   Li Zefan   blktrace: add tra...
810
811
812
  	ret = blk_trace_init_sysfs(dev);
  	if (ret)
  		return ret;
cecf5d87f   Ming Lei   block: split .sys...
813
  	mutex_lock(&q->sysfs_dir_lock);
b410aff2b   Tahsin Erdogan   block: do not all...
814

c9059598e   Linus Torvalds   Merge branch 'for...
815
  	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
ed5302d3c   Liu Yuan   block, blk-sysfs:...
816
817
  	if (ret < 0) {
  		blk_trace_remove_sysfs(dev);
b410aff2b   Tahsin Erdogan   block: do not all...
818
  		goto unlock;
ed5302d3c   Liu Yuan   block, blk-sysfs:...
819
  	}
8324aa91d   Jens Axboe   block: split tag ...
820

4d25339e3   Weiping Zhang   block: don't show...
821
822
823
824
825
826
827
  	ret = sysfs_create_group(&q->kobj, &queue_attr_group);
  	if (ret) {
  		blk_trace_remove_sysfs(dev);
  		kobject_del(&q->kobj);
  		kobject_put(&dev->kobj);
  		goto unlock;
  	}
85e0cbbb8   Luis Chamberlain   block: create the...
828
829
830
831
  	mutex_lock(&q->debugfs_mutex);
  	q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
  					    blk_debugfs_root);
  	mutex_unlock(&q->debugfs_mutex);
344e9ffcb   Jens Axboe   block: add queue_...
832
  	if (queue_is_mq(q)) {
2d0364c8c   Bart Van Assche   blk-mq: Register ...
833
  		__blk_mq_register_dev(dev, q);
a8ecdd711   Bart Van Assche   blk-mq: Only regi...
834
835
  		blk_mq_debugfs_register(q);
  	}
9c1051aac   Omar Sandoval   blk-mq: untangle ...
836

b89f625e2   Ming Lei   block: don't rele...
837
  	mutex_lock(&q->sysfs_lock);
344e9ffcb   Jens Axboe   block: add queue_...
838
  	if (q->elevator) {
cecf5d87f   Ming Lei   block: split .sys...
839
  		ret = elv_register_queue(q, false);
80c6b1573   Omar Sandoval   blk-mq-sched: (un...
840
  		if (ret) {
b89f625e2   Ming Lei   block: don't rele...
841
  			mutex_unlock(&q->sysfs_lock);
cecf5d87f   Ming Lei   block: split .sys...
842
  			mutex_unlock(&q->sysfs_dir_lock);
80c6b1573   Omar Sandoval   blk-mq-sched: (un...
843
844
845
  			kobject_del(&q->kobj);
  			blk_trace_remove_sysfs(dev);
  			kobject_put(&dev->kobj);
2c2086afc   Bart Van Assche   block: Protect le...
846
  			return ret;
80c6b1573   Omar Sandoval   blk-mq-sched: (un...
847
  		}
8324aa91d   Jens Axboe   block: split tag ...
848
  	}
cecf5d87f   Ming Lei   block: split .sys...
849

cecf5d87f   Ming Lei   block: split .sys...
850
851
852
853
854
855
  	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
  	wbt_enable_default(q);
  	blk_throtl_register_queue(q);
  
  	/* Now everything is ready and send out KOBJ_ADD uevent */
  	kobject_uevent(&q->kobj, KOBJ_ADD);
0546858c5   Yufen Yu   block: get rid of...
856
  	if (q->elevator)
cecf5d87f   Ming Lei   block: split .sys...
857
858
  		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
  	mutex_unlock(&q->sysfs_lock);
b410aff2b   Tahsin Erdogan   block: do not all...
859
860
  	ret = 0;
  unlock:
cecf5d87f   Ming Lei   block: split .sys...
861
  	mutex_unlock(&q->sysfs_dir_lock);
b410aff2b   Tahsin Erdogan   block: do not all...
862
  	return ret;
8324aa91d   Jens Axboe   block: split tag ...
863
  }
fa70d2e2c   Mike Snitzer   block: allow gend...
864
  EXPORT_SYMBOL_GPL(blk_register_queue);
8324aa91d   Jens Axboe   block: split tag ...
865

2c2086afc   Bart Van Assche   block: Protect le...
866
867
868
869
870
871
872
  /**
   * blk_unregister_queue - counterpart of blk_register_queue()
   * @disk: Disk of which the request queue should be unregistered from sysfs.
   *
   * Note: the caller is responsible for guaranteeing that this function is called
   * after blk_register_queue() has finished.
   */
8324aa91d   Jens Axboe   block: split tag ...
873
874
875
  void blk_unregister_queue(struct gendisk *disk)
  {
  	struct request_queue *q = disk->queue;
fb1997463   Akinobu Mita   block: fix blk_re...
876
877
  	if (WARN_ON(!q))
  		return;
fa70d2e2c   Mike Snitzer   block: allow gend...
878
  	/* Return early if disk->queue was never registered. */
58c898ba3   Ming Lei   block: add helper...
879
  	if (!blk_queue_registered(q))
fa70d2e2c   Mike Snitzer   block: allow gend...
880
  		return;
667257e8b   Mike Snitzer   block: properly p...
881
  	/*
2c2086afc   Bart Van Assche   block: Protect le...
882
883
884
  	 * Since sysfs_remove_dir() prevents adding new directory entries
  	 * before removal of existing entries starts, protect against
  	 * concurrent elv_iosched_store() calls.
667257e8b   Mike Snitzer   block: properly p...
885
  	 */
e9a823fb3   David Jeffery   block: fix warnin...
886
  	mutex_lock(&q->sysfs_lock);
8814ce8a0   Bart Van Assche   block: Introduce ...
887
  	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
cecf5d87f   Ming Lei   block: split .sys...
888
  	mutex_unlock(&q->sysfs_lock);
02ba8893a   Omar Sandoval   block: fix leak o...
889

cecf5d87f   Ming Lei   block: split .sys...
890
  	mutex_lock(&q->sysfs_dir_lock);
2c2086afc   Bart Van Assche   block: Protect le...
891
892
893
894
  	/*
  	 * Remove the sysfs attributes before unregistering the queue data
  	 * structures that can be modified through sysfs.
  	 */
344e9ffcb   Jens Axboe   block: add queue_...
895
  	if (queue_is_mq(q))
b21d5b301   Matias Bjørling   blk-mq: register ...
896
  		blk_mq_unregister_dev(disk_to_dev(disk), q);
8324aa91d   Jens Axboe   block: split tag ...
897

48c0d4d4c   Zdenek Kabelac   Add missing blk_t...
898
899
900
  	kobject_uevent(&q->kobj, KOBJ_REMOVE);
  	kobject_del(&q->kobj);
  	blk_trace_remove_sysfs(disk_to_dev(disk));
667257e8b   Mike Snitzer   block: properly p...
901

b89f625e2   Ming Lei   block: don't rele...
902
  	mutex_lock(&q->sysfs_lock);
344e9ffcb   Jens Axboe   block: add queue_...
903
  	if (q->elevator)
2c2086afc   Bart Van Assche   block: Protect le...
904
  		elv_unregister_queue(q);
b89f625e2   Ming Lei   block: don't rele...
905
  	mutex_unlock(&q->sysfs_lock);
cecf5d87f   Ming Lei   block: split .sys...
906
  	mutex_unlock(&q->sysfs_dir_lock);
2c2086afc   Bart Van Assche   block: Protect le...
907
908
  
  	kobject_put(&disk_to_dev(disk)->kobj);
8324aa91d   Jens Axboe   block: split tag ...
909
  }