Commit 402a26f0c040077ed6f941eefac5a6971f0d5f40

Authored by Linus Torvalds

Merge branch 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block:
  [PATCH] block/elevator.c: remove unused exports
  [PATCH] splice: fix smaller sized splice reads
  [PATCH] Don't inherit ->splice_pipe across forks
  [patch] cleanup: use blk_queue_stopped
  [PATCH] Document online io scheduler switching

Showing 5 changed files Side-by-side Diff

Documentation/block/switching-sched.txt
  1 +As of the Linux 2.6.10 kernel, it is now possible to change the
  2 +IO scheduler for a given block device on the fly (thus making it possible,
  3 +for instance, to set the CFQ scheduler for the system default, but
  4 +set a specific device to use the anticipatory or noop schedulers - which
  5 +can improve that device's throughput).
  6 +
  7 +To set a specific scheduler, simply do this:
  8 +
  9 +echo SCHEDNAME > /sys/block/DEV/queue/scheduler
  10 +
  11 +where SCHEDNAME is the name of a defined IO scheduler, and DEV is the
  12 +device name (hda, hdb, sga, or whatever you happen to have).
  13 +
  14 +The list of defined schedulers can be found by simply doing
  15 +a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
  16 +will be displayed, with the currently selected scheduler in brackets:
  17 +
  18 +# cat /sys/block/hda/queue/scheduler
  19 +noop anticipatory deadline [cfq]
  20 +# echo anticipatory > /sys/block/hda/queue/scheduler
  21 +# cat /sys/block/hda/queue/scheduler
  22 +noop [anticipatory] deadline cfq
... ... @@ -895,11 +895,9 @@
895 895 EXPORT_SYMBOL(elv_dispatch_sort);
896 896 EXPORT_SYMBOL(elv_add_request);
897 897 EXPORT_SYMBOL(__elv_add_request);
898   -EXPORT_SYMBOL(elv_requeue_request);
899 898 EXPORT_SYMBOL(elv_next_request);
900 899 EXPORT_SYMBOL(elv_dequeue_request);
901 900 EXPORT_SYMBOL(elv_queue_empty);
902   -EXPORT_SYMBOL(elv_completed_request);
903 901 EXPORT_SYMBOL(elevator_exit);
904 902 EXPORT_SYMBOL(elevator_init);
... ... @@ -1554,7 +1554,7 @@
1554 1554 * don't plug a stopped queue, it must be paired with blk_start_queue()
1555 1555 * which will restart the queueing
1556 1556 */
1557   - if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
  1557 + if (blk_queue_stopped(q))
1558 1558 return;
1559 1559  
1560 1560 if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
... ... @@ -1587,7 +1587,7 @@
1587 1587 */
1588 1588 void __generic_unplug_device(request_queue_t *q)
1589 1589 {
1590   - if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
  1590 + if (unlikely(blk_queue_stopped(q)))
1591 1591 return;
1592 1592  
1593 1593 if (!blk_remove_plug(q))
... ... @@ -275,6 +275,15 @@
275 275 error = 0;
276 276 bytes = 0;
277 277 for (i = 0; i < nr_pages; i++, index++) {
  278 + unsigned int this_len;
  279 +
  280 + if (!len)
  281 + break;
  282 +
  283 + /*
  284 + * this_len is the max we'll use from this page
  285 + */
  286 + this_len = min(len, PAGE_CACHE_SIZE - loff);
278 287 find_page:
279 288 /*
280 289 * lookup the page for this index
281 290  
... ... @@ -366,11 +375,13 @@
366 375 * force quit after adding this page
367 376 */
368 377 nr_pages = i;
  378 + this_len = min(this_len, loff);
369 379 }
370 380 }
371 381 fill_it:
372 382 pages[i] = page;
373   - bytes += PAGE_CACHE_SIZE - loff;
  383 + bytes += this_len;
  384 + len -= this_len;
374 385 loff = 0;
375 386 }
376 387  
... ... @@ -180,6 +180,7 @@
180 180 atomic_set(&tsk->usage,2);
181 181 atomic_set(&tsk->fs_excl, 0);
182 182 tsk->btrace_seq = 0;
  183 + tsk->splice_pipe = NULL;
183 184 return tsk;
184 185 }
185 186