Blame view

block/blk-sysfs.c 8.35 KB
8324aa91d   Jens Axboe   block: split tag ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
  /*
   * Functions related to sysfs handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/blktrace_api.h>
  
  #include "blk.h"
  
  struct queue_sysfs_entry {
  	struct attribute attr;
  	ssize_t (*show)(struct request_queue *, char *);
  	ssize_t (*store)(struct request_queue *, const char *, size_t);
  };
  
  static ssize_t
  queue_var_show(unsigned int var, char *page)
  {
  	return sprintf(page, "%d
  ", var);
  }
  
  static ssize_t
  queue_var_store(unsigned long *var, const char *page, size_t count)
  {
  	char *p = (char *) page;
  
  	*var = simple_strtoul(p, &p, 10);
  	return count;
  }
  
  static ssize_t queue_requests_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(q->nr_requests, (page));
  }
  
  static ssize_t
  queue_requests_store(struct request_queue *q, const char *page, size_t count)
  {
  	struct request_list *rl = &q->rq;
  	unsigned long nr;
  	int ret = queue_var_store(&nr, page, count);
  	if (nr < BLKDEV_MIN_RQ)
  		nr = BLKDEV_MIN_RQ;
  
  	spin_lock_irq(q->queue_lock);
  	q->nr_requests = nr;
  	blk_queue_congestion_threshold(q);
  
  	if (rl->count[READ] >= queue_congestion_on_threshold(q))
  		blk_set_queue_congested(q, READ);
  	else if (rl->count[READ] < queue_congestion_off_threshold(q))
  		blk_clear_queue_congested(q, READ);
  
  	if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
  		blk_set_queue_congested(q, WRITE);
  	else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
  		blk_clear_queue_congested(q, WRITE);
  
  	if (rl->count[READ] >= q->nr_requests) {
  		blk_set_queue_full(q, READ);
  	} else if (rl->count[READ]+1 <= q->nr_requests) {
  		blk_clear_queue_full(q, READ);
  		wake_up(&rl->wait[READ]);
  	}
  
  	if (rl->count[WRITE] >= q->nr_requests) {
  		blk_set_queue_full(q, WRITE);
  	} else if (rl->count[WRITE]+1 <= q->nr_requests) {
  		blk_clear_queue_full(q, WRITE);
  		wake_up(&rl->wait[WRITE]);
  	}
  	spin_unlock_irq(q->queue_lock);
  	return ret;
  }
  
  static ssize_t queue_ra_show(struct request_queue *q, char *page)
  {
  	int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
  
  	return queue_var_show(ra_kb, (page));
  }
  
  static ssize_t
  queue_ra_store(struct request_queue *q, const char *page, size_t count)
  {
  	unsigned long ra_kb;
  	ssize_t ret = queue_var_store(&ra_kb, page, count);
  
  	spin_lock_irq(q->queue_lock);
  	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
  	spin_unlock_irq(q->queue_lock);
  
  	return ret;
  }
  
  static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
  {
  	int max_sectors_kb = q->max_sectors >> 1;
  
  	return queue_var_show(max_sectors_kb, (page));
  }
e68b903c6   Martin K. Petersen   Expose hardware s...
105
106
107
108
  static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(q->hardsect_size, page);
  }
8324aa91d   Jens Axboe   block: split tag ...
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
  static ssize_t
  queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
  {
  	unsigned long max_sectors_kb,
  			max_hw_sectors_kb = q->max_hw_sectors >> 1,
  			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
  	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
  
  	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
  		return -EINVAL;
  	/*
  	 * Take the queue lock to update the readahead and max_sectors
  	 * values synchronously:
  	 */
  	spin_lock_irq(q->queue_lock);
  	q->max_sectors = max_sectors_kb << 1;
  	spin_unlock_irq(q->queue_lock);
  
  	return ret;
  }
  
  static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
  {
  	int max_hw_sectors_kb = q->max_hw_sectors >> 1;
  
  	return queue_var_show(max_hw_sectors_kb, (page));
  }
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
136
137
138
139
140
141
142
143
144
145
  static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
  {
  	return queue_var_show(blk_queue_nomerges(q), page);
  }
  
  static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
  				    size_t count)
  {
  	unsigned long nm;
  	ssize_t ret = queue_var_store(&nm, page, count);
bf0f97025   Jens Axboe   block: sysfs stor...
146
  	spin_lock_irq(q->queue_lock);
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
147
  	if (nm)
bf0f97025   Jens Axboe   block: sysfs stor...
148
  		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
149
  	else
bf0f97025   Jens Axboe   block: sysfs stor...
150
  		queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
151

bf0f97025   Jens Axboe   block: sysfs stor...
152
  	spin_unlock_irq(q->queue_lock);
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
153
154
  	return ret;
  }
8324aa91d   Jens Axboe   block: split tag ...
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
  
  static struct queue_sysfs_entry queue_requests_entry = {
  	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_requests_show,
  	.store = queue_requests_store,
  };
  
  static struct queue_sysfs_entry queue_ra_entry = {
  	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_ra_show,
  	.store = queue_ra_store,
  };
  
  static struct queue_sysfs_entry queue_max_sectors_entry = {
  	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_max_sectors_show,
  	.store = queue_max_sectors_store,
  };
  
  static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
  	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
  	.show = queue_max_hw_sectors_show,
  };
  
  static struct queue_sysfs_entry queue_iosched_entry = {
  	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
  	.show = elv_iosched_show,
  	.store = elv_iosched_store,
  };
e68b903c6   Martin K. Petersen   Expose hardware s...
184
185
186
187
  static struct queue_sysfs_entry queue_hw_sector_size_entry = {
  	.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
  	.show = queue_hw_sector_size_show,
  };
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
188
189
190
191
192
  static struct queue_sysfs_entry queue_nomerges_entry = {
  	.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
  	.show = queue_nomerges_show,
  	.store = queue_nomerges_store,
  };
8324aa91d   Jens Axboe   block: split tag ...
193
194
195
196
197
198
  static struct attribute *default_attrs[] = {
  	&queue_requests_entry.attr,
  	&queue_ra_entry.attr,
  	&queue_max_hw_sectors_entry.attr,
  	&queue_max_sectors_entry.attr,
  	&queue_iosched_entry.attr,
e68b903c6   Martin K. Petersen   Expose hardware s...
199
  	&queue_hw_sector_size_entry.attr,
ac9fafa12   Alan D. Brunelle   block: Skip I/O m...
200
  	&queue_nomerges_entry.attr,
8324aa91d   Jens Axboe   block: split tag ...
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
  	NULL,
  };
  
  #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
  
  static ssize_t
  queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  {
  	struct queue_sysfs_entry *entry = to_queue(attr);
  	struct request_queue *q =
  		container_of(kobj, struct request_queue, kobj);
  	ssize_t res;
  
  	if (!entry->show)
  		return -EIO;
  	mutex_lock(&q->sysfs_lock);
  	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
  		mutex_unlock(&q->sysfs_lock);
  		return -ENOENT;
  	}
  	res = entry->show(q, page);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  static ssize_t
  queue_attr_store(struct kobject *kobj, struct attribute *attr,
  		    const char *page, size_t length)
  {
  	struct queue_sysfs_entry *entry = to_queue(attr);
6728cb0e6   Jens Axboe   block: make core ...
231
  	struct request_queue *q;
8324aa91d   Jens Axboe   block: split tag ...
232
233
234
235
  	ssize_t res;
  
  	if (!entry->store)
  		return -EIO;
6728cb0e6   Jens Axboe   block: make core ...
236
237
  
  	q = container_of(kobj, struct request_queue, kobj);
8324aa91d   Jens Axboe   block: split tag ...
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
  	mutex_lock(&q->sysfs_lock);
  	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
  		mutex_unlock(&q->sysfs_lock);
  		return -ENOENT;
  	}
  	res = entry->store(q, page, length);
  	mutex_unlock(&q->sysfs_lock);
  	return res;
  }
  
  /**
   * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
   * @kobj:    the kobj belonging of the request queue to be released
   *
   * Description:
   *     blk_cleanup_queue is the pair to blk_init_queue() or
   *     blk_queue_make_request().  It should be called when a request queue is
   *     being released; typically when a block device is being de-registered.
   *     Currently, its primary task it to free all the &struct request
   *     structures that were allocated to the queue and the queue itself.
   *
   * Caveat:
   *     Hopefully the low level driver will have finished any
   *     outstanding requests first...
   **/
  static void blk_release_queue(struct kobject *kobj)
  {
  	struct request_queue *q =
  		container_of(kobj, struct request_queue, kobj);
  	struct request_list *rl = &q->rq;
  
  	blk_sync_queue(q);
  
  	if (rl->rq_pool)
  		mempool_destroy(rl->rq_pool);
  
  	if (q->queue_tags)
  		__blk_queue_free_tags(q);
  
  	blk_trace_shutdown(q);
  
  	bdi_destroy(&q->backing_dev_info);
  	kmem_cache_free(blk_requestq_cachep, q);
  }
  
  static struct sysfs_ops queue_sysfs_ops = {
  	.show	= queue_attr_show,
  	.store	= queue_attr_store,
  };
  
  struct kobj_type blk_queue_ktype = {
  	.sysfs_ops	= &queue_sysfs_ops,
  	.default_attrs	= default_attrs,
  	.release	= blk_release_queue,
  };
  
  int blk_register_queue(struct gendisk *disk)
  {
  	int ret;
  
  	struct request_queue *q = disk->queue;
fb1997463   Akinobu Mita   block: fix blk_re...
299
  	if (WARN_ON(!q))
8324aa91d   Jens Axboe   block: split tag ...
300
  		return -ENXIO;
fb1997463   Akinobu Mita   block: fix blk_re...
301
302
  	if (!q->request_fn)
  		return 0;
8324aa91d   Jens Axboe   block: split tag ...
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
  	ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
  			  "%s", "queue");
  	if (ret < 0)
  		return ret;
  
  	kobject_uevent(&q->kobj, KOBJ_ADD);
  
  	ret = elv_register_queue(q);
  	if (ret) {
  		kobject_uevent(&q->kobj, KOBJ_REMOVE);
  		kobject_del(&q->kobj);
  		return ret;
  	}
  
  	return 0;
  }
  
  void blk_unregister_queue(struct gendisk *disk)
  {
  	struct request_queue *q = disk->queue;
fb1997463   Akinobu Mita   block: fix blk_re...
323
324
325
326
  	if (WARN_ON(!q))
  		return;
  
  	if (q->request_fn) {
8324aa91d   Jens Axboe   block: split tag ...
327
328
329
330
331
332
333
  		elv_unregister_queue(q);
  
  		kobject_uevent(&q->kobj, KOBJ_REMOVE);
  		kobject_del(&q->kobj);
  		kobject_put(&disk->dev.kobj);
  	}
  }