Commit e2e1a148bc45855816ae6b4692ce29d0020fa22e
1 parent
841fdffdd3
Exists in
master
and in
7 other branches
block: add sysfs knob for turning off disk entropy contributions
There are two reasons for doing this: - On SSD disks, the completion times aren't as random as they are for rotational drives. So it's questionable whether they should contribute to the random pool in the first place. - Calling add_disk_randomness() has a lot of overhead. This adds /sys/block/<dev>/queue/add_random that will allow you to switch off on a per-device basis. The default setting is on, so there should be no functional changes from this patch. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Showing 3 changed files with 34 additions and 2 deletions Side-by-side Diff
block/blk-core.c
block/blk-sysfs.c
... | ... | @@ -250,6 +250,27 @@ |
250 | 250 | return ret; |
251 | 251 | } |
252 | 252 | |
253 | +static ssize_t queue_random_show(struct request_queue *q, char *page) | |
254 | +{ | |
255 | + return queue_var_show(blk_queue_add_random(q), page); | |
256 | +} | |
257 | + | |
258 | +static ssize_t queue_random_store(struct request_queue *q, const char *page, | |
259 | + size_t count) | |
260 | +{ | |
261 | + unsigned long val; | |
262 | + ssize_t ret = queue_var_store(&val, page, count); | |
263 | + | |
264 | + spin_lock_irq(q->queue_lock); | |
265 | + if (val) | |
266 | + queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | |
267 | + else | |
268 | + queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); | |
269 | + spin_unlock_irq(q->queue_lock); | |
270 | + | |
271 | + return ret; | |
272 | +} | |
273 | + | |
253 | 274 | static ssize_t queue_iostats_show(struct request_queue *q, char *page) |
254 | 275 | { |
255 | 276 | return queue_var_show(blk_queue_io_stat(q), page); |
... | ... | @@ -374,6 +395,12 @@ |
374 | 395 | .store = queue_iostats_store, |
375 | 396 | }; |
376 | 397 | |
398 | +static struct queue_sysfs_entry queue_random_entry = { | |
399 | + .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, | |
400 | + .show = queue_random_show, | |
401 | + .store = queue_random_store, | |
402 | +}; | |
403 | + | |
377 | 404 | static struct attribute *default_attrs[] = { |
378 | 405 | &queue_requests_entry.attr, |
379 | 406 | &queue_ra_entry.attr, |
... | ... | @@ -394,6 +421,7 @@ |
394 | 421 | &queue_nomerges_entry.attr, |
395 | 422 | &queue_rq_affinity_entry.attr, |
396 | 423 | &queue_iostats_entry.attr, |
424 | + &queue_random_entry.attr, | |
397 | 425 | NULL, |
398 | 426 | }; |
399 | 427 |
include/linux/blkdev.h
... | ... | @@ -467,11 +467,13 @@ |
467 | 467 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
468 | 468 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
469 | 469 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ |
470 | +#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | |
470 | 471 | |
471 | 472 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
472 | 473 | (1 << QUEUE_FLAG_CLUSTER) | \ |
473 | 474 | (1 << QUEUE_FLAG_STACKABLE) | \ |
474 | - (1 << QUEUE_FLAG_SAME_COMP)) | |
475 | + (1 << QUEUE_FLAG_SAME_COMP) | \ | |
476 | + (1 << QUEUE_FLAG_ADD_RANDOM)) | |
475 | 477 | |
476 | 478 | static inline int queue_is_locked(struct request_queue *q) |
477 | 479 | { |
... | ... | @@ -596,6 +598,7 @@ |
596 | 598 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
597 | 599 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
598 | 600 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
601 | +#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | |
599 | 602 | #define blk_queue_flushing(q) ((q)->ordseq) |
600 | 603 | #define blk_queue_stackable(q) \ |
601 | 604 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |