Commit 045a2529a3513faed2d45bd82f9013b124309d94

Authored by Wu Fengguang
Committed by Linus Torvalds
1 parent dc566127dd

readahead: move the random read case to bottom

Split all readahead cases, and move the random one to bottom.

No behavior changes.

This is to prepare for the introduction of context readahead, and make it
easy for inserting accounting/tracing points for each case.

Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Vladislav Bolkhovitin <vst@vlnb.net>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Ying Han <yinghan@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 25 additions and 21 deletions Side-by-side Diff

... ... @@ -339,34 +339,26 @@
339 339 unsigned long req_size)
340 340 {
341 341 unsigned long max = max_sane_readahead(ra->ra_pages);
342   - pgoff_t prev_offset;
343   - int sequential;
344 342  
345 343 /*
  344 + * start of file
  345 + */
  346 + if (!offset)
  347 + goto initial_readahead;
  348 +
  349 + /*
346 350 * It's the expected callback offset, assume sequential access.
347 351 * Ramp up sizes, and push forward the readahead window.
348 352 */
349   - if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
350   - offset == (ra->start + ra->size))) {
  353 + if ((offset == (ra->start + ra->size - ra->async_size) ||
  354 + offset == (ra->start + ra->size))) {
351 355 ra->start += ra->size;
352 356 ra->size = get_next_ra_size(ra, max);
353 357 ra->async_size = ra->size;
354 358 goto readit;
355 359 }
356 360  
357   - prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
358   - sequential = offset - prev_offset <= 1UL || req_size > max;
359   -
360 361 /*
361   - * Standalone, small read.
362   - * Read as is, and do not pollute the readahead state.
363   - */
364   - if (!hit_readahead_marker && !sequential) {
365   - return __do_page_cache_readahead(mapping, filp,
366   - offset, req_size, 0);
367   - }
368   -
369   - /*
370 362 * Hit a marked page without valid readahead state.
371 363 * E.g. interleaved reads.
372 364 * Query the pagecache for async_size, which normally equals to
373 365  
... ... @@ -391,12 +383,24 @@
391 383 }
392 384  
393 385 /*
394   - * It may be one of
395   - * - first read on start of file
396   - * - sequential cache miss
397   - * - oversize random read
398   - * Start readahead for it.
  386 + * oversize read
399 387 */
  388 + if (req_size > max)
  389 + goto initial_readahead;
  390 +
  391 + /*
  392 + * sequential cache miss
  393 + */
  394 + if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
  395 + goto initial_readahead;
  396 +
  397 + /*
  398 + * standalone, small random read
  399 + * Read as is, and do not pollute the readahead state.
  400 + */
  401 + return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
  402 +
  403 +initial_readahead:
400 404 ra->start = offset;
401 405 ra->size = get_init_ra_size(req_size, max);
402 406 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;