Commit ac9fafa1243640349aa481adf473db283a695766

Authored by Alan D. Brunelle
Committed by Jens Axboe
1 parent d7e3c3249e

block: Skip I/O merges when disabled

The block I/O + elevator + I/O scheduler code spend a lot of time trying
to merge I/Os -- rightfully so under "normal" circumstances. However,
if one were to know that the incoming I/O stream was /very/ random in
nature, the cycles are wasted.

This patch adds a per-request_queue tunable that (when set) disables
merge attempts (beyond the simple one-hit cache check), thus freeing up
a non-trivial amount of CPU cycles.

Signed-off-by: Alan D. Brunelle <alan.brunelle@hp.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

Showing 3 changed files with 31 additions and 0 deletions Side-by-side Diff

... ... @@ -135,7 +135,26 @@
135 135 return queue_var_show(max_hw_sectors_kb, (page));
136 136 }
137 137  
  138 +static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
  139 +{
  140 + return queue_var_show(blk_queue_nomerges(q), page);
  141 +}
138 142  
  143 +static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
  144 + size_t count)
  145 +{
  146 + unsigned long nm;
  147 + ssize_t ret = queue_var_store(&nm, page, count);
  148 +
  149 + if (nm)
  150 + set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
  151 + else
  152 + clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
  153 +
  154 + return ret;
  155 +}
  156 +
  157 +
139 158 static struct queue_sysfs_entry queue_requests_entry = {
140 159 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
141 160 .show = queue_requests_show,
... ... @@ -170,6 +189,12 @@
170 189 .show = queue_hw_sector_size_show,
171 190 };
172 191  
  192 +static struct queue_sysfs_entry queue_nomerges_entry = {
  193 + .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
  194 + .show = queue_nomerges_show,
  195 + .store = queue_nomerges_store,
  196 +};
  197 +
173 198 static struct attribute *default_attrs[] = {
174 199 &queue_requests_entry.attr,
175 200 &queue_ra_entry.attr,
... ... @@ -177,6 +202,7 @@
177 202 &queue_max_sectors_entry.attr,
178 203 &queue_iosched_entry.attr,
179 204 &queue_hw_sector_size_entry.attr,
  205 + &queue_nomerges_entry.attr,
180 206 NULL,
181 207 };
182 208  
... ... @@ -488,6 +488,9 @@
488 488 }
489 489 }
490 490  
  491 + if (blk_queue_nomerges(q))
  492 + return ELEVATOR_NO_MERGE;
  493 +
491 494 /*
492 495 * See if our hash lookup can find a potential backmerge.
493 496 */
include/linux/blkdev.h
... ... @@ -408,6 +408,7 @@
408 408 #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
409 409 #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
410 410 #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
  411 +#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
411 412  
412 413 static inline void queue_flag_set_unlocked(unsigned int flag,
413 414 struct request_queue *q)
... ... @@ -476,6 +477,7 @@
476 477 #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
477 478 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
478 479 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
  480 +#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
479 481 #define blk_queue_flushing(q) ((q)->ordseq)
480 482  
481 483 #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)