Commit f9acc8c7b35a100f3a9e0e6977f7807b0169f9a5

Authored by Fengguang Wu
Committed by Linus Torvalds
1 parent cf914a7d65

readahead: sanify file_ra_state names

Rename some file_ra_state variables and remove some accessors.

It results in much simpler code.
Kudos to Rusty!

Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 2 changed files with 31 additions and 98 deletions Side-by-side Diff

... ... @@ -695,16 +695,12 @@
695 695  
696 696 /*
697 697 * Track a single file's readahead state
698   - *
699   - * ================#============|==================#==================|
700   - * ^ ^ ^ ^
701   - * file_ra_state.la_index .ra_index .lookahead_index .readahead_index
702 698 */
703 699 struct file_ra_state {
704   - pgoff_t la_index; /* enqueue time */
705   - pgoff_t ra_index; /* begin offset */
706   - pgoff_t lookahead_index; /* time to do next readahead */
707   - pgoff_t readahead_index; /* end offset */
  700 + pgoff_t start; /* where readahead started */
  701 + unsigned long size; /* # of readahead pages */
  702 + unsigned long async_size; /* do asynchronous readahead when
  703 + there are only # of pages ahead */
708 704  
709 705 unsigned long ra_pages; /* Maximum readahead window */
710 706 unsigned long mmap_hit; /* Cache hit stat for mmap accesses */
711 707  
712 708  
... ... @@ -714,58 +710,13 @@
714 710 };
715 711  
716 712 /*
717   - * Measuring read-ahead sizes.
718   - *
719   - * |----------- readahead size ------------>|
720   - * ===#============|==================#=====================|
721   - * |------- invoke interval ------>|-- lookahead size -->|
722   - */
723   -static inline unsigned long ra_readahead_size(struct file_ra_state *ra)
724   -{
725   - return ra->readahead_index - ra->ra_index;
726   -}
727   -
728   -static inline unsigned long ra_lookahead_size(struct file_ra_state *ra)
729   -{
730   - return ra->readahead_index - ra->lookahead_index;
731   -}
732   -
733   -static inline unsigned long ra_invoke_interval(struct file_ra_state *ra)
734   -{
735   - return ra->lookahead_index - ra->la_index;
736   -}
737   -
738   -/*
739 713 * Check if @index falls in the readahead windows.
740 714 */
741 715 static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
742 716 {
743   - return (index >= ra->la_index &&
744   - index < ra->readahead_index);
  717 + return (index >= ra->start &&
  718 + index < ra->start + ra->size);
745 719 }
746   -
747   -/*
748   - * Where is the old read-ahead and look-ahead?
749   - */
750   -static inline void ra_set_index(struct file_ra_state *ra,
751   - pgoff_t la_index, pgoff_t ra_index)
752   -{
753   - ra->la_index = la_index;
754   - ra->ra_index = ra_index;
755   -}
756   -
757   -/*
758   - * Where is the new read-ahead and look-ahead?
759   - */
760   -static inline void ra_set_size(struct file_ra_state *ra,
761   - unsigned long ra_size, unsigned long la_size)
762   -{
763   - ra->readahead_index = ra->ra_index + ra_size;
764   - ra->lookahead_index = ra->ra_index + ra_size - la_size;
765   -}
766   -
767   -unsigned long ra_submit(struct file_ra_state *ra,
768   - struct address_space *mapping, struct file *filp);
769 720  
770 721 struct file {
771 722 /*
... ... @@ -253,21 +253,16 @@
253 253 /*
254 254 * Submit IO for the read-ahead request in file_ra_state.
255 255 */
256   -unsigned long ra_submit(struct file_ra_state *ra,
  256 +static unsigned long ra_submit(struct file_ra_state *ra,
257 257 struct address_space *mapping, struct file *filp)
258 258 {
259   - unsigned long ra_size;
260   - unsigned long la_size;
261 259 int actual;
262 260  
263   - ra_size = ra_readahead_size(ra);
264   - la_size = ra_lookahead_size(ra);
265 261 actual = __do_page_cache_readahead(mapping, filp,
266   - ra->ra_index, ra_size, la_size);
  262 + ra->start, ra->size, ra->async_size);
267 263  
268 264 return actual;
269 265 }
270   -EXPORT_SYMBOL_GPL(ra_submit);
271 266  
272 267 /*
273 268 * Set the initial window size, round to next power of 2 and square
... ... @@ -296,7 +291,7 @@
296 291 static unsigned long get_next_ra_size(struct file_ra_state *ra,
297 292 unsigned long max)
298 293 {
299   - unsigned long cur = ra->readahead_index - ra->ra_index;
  294 + unsigned long cur = ra->size;
300 295 unsigned long newsize;
301 296  
302 297 if (cur < max / 16)
303 298  
304 299  
305 300  
... ... @@ -313,28 +308,21 @@
313 308 * The fields in struct file_ra_state represent the most-recently-executed
314 309 * readahead attempt:
315 310 *
316   - * |-------- last readahead window -------->|
317   - * |-- application walking here -->|
318   - * ======#============|==================#=====================|
319   - * ^la_index ^ra_index ^lookahead_index ^readahead_index
  311 + * |<----- async_size ---------|
  312 + * |------------------- size -------------------->|
  313 + * |==================#===========================|
  314 + * ^start ^page marked with PG_readahead
320 315 *
321   - * [ra_index, readahead_index) represents the last readahead window.
322   - *
323   - * [la_index, lookahead_index] is where the application would be walking(in
324   - * the common case of cache-cold sequential reads): the last window was
325   - * established when the application was at la_index, and the next window will
326   - * be bring in when the application reaches lookahead_index.
327   - *
328 316 * To overlap application thinking time and disk I/O time, we do
329 317 * `readahead pipelining': Do not wait until the application consumed all
330 318 * readahead pages and stalled on the missing page at readahead_index;
331   - * Instead, submit an asynchronous readahead I/O as early as the application
332   - * reads on the page at lookahead_index. Normally lookahead_index will be
333   - * equal to ra_index, for maximum pipelining.
  319 + * Instead, submit an asynchronous readahead I/O as soon as there are
  320 + * only async_size pages left in the readahead window. Normally async_size
  321 + * will be equal to size, for maximum pipelining.
334 322 *
335 323 * In interleaved sequential reads, concurrent streams on the same fd can
336 324 * be invalidating each other's readahead state. So we flag the new readahead
337   - * page at lookahead_index with PG_readahead, and use it as readahead
  325 + * page at (start+size-async_size) with PG_readahead, and use it as readahead
338 326 * indicator. The flag won't be set on already cached pages, to avoid the
339 327 * readahead-for-nothing fuss, saving pointless page cache lookups.
340 328 *
341 329  
342 330  
... ... @@ -363,24 +351,21 @@
363 351 unsigned long req_size)
364 352 {
365 353 unsigned long max; /* max readahead pages */
366   - pgoff_t ra_index; /* readahead index */
367   - unsigned long ra_size; /* readahead size */
368   - unsigned long la_size; /* lookahead size */
369 354 int sequential;
370 355  
371 356 max = ra->ra_pages;
372 357 sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
373 358  
374 359 /*
375   - * Lookahead/readahead hit, assume sequential access.
  360 + * It's the expected callback offset, assume sequential access.
376 361 * Ramp up sizes, and push forward the readahead window.
377 362 */
378   - if (offset && (offset == ra->lookahead_index ||
379   - offset == ra->readahead_index)) {
380   - ra_index = ra->readahead_index;
381   - ra_size = get_next_ra_size(ra, max);
382   - la_size = ra_size;
383   - goto fill_ra;
  363 + if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
  364 + offset == (ra->start + ra->size))) {
  365 + ra->start += ra->size;
  366 + ra->size = get_next_ra_size(ra, max);
  367 + ra->async_size = ra->size;
  368 + goto readit;
384 369 }
385 370  
386 371 /*
387 372  
388 373  
389 374  
... ... @@ -399,24 +384,21 @@
399 384 * - oversize random read
400 385 * Start readahead for it.
401 386 */
402   - ra_index = offset;
403   - ra_size = get_init_ra_size(req_size, max);
404   - la_size = ra_size > req_size ? ra_size - req_size : ra_size;
  387 + ra->start = offset;
  388 + ra->size = get_init_ra_size(req_size, max);
  389 + ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
405 390  
406 391 /*
407   - * Hit on a lookahead page without valid readahead state.
  392 + * Hit on a marked page without valid readahead state.
408 393 * E.g. interleaved reads.
409 394 * Not knowing its readahead pos/size, bet on the minimal possible one.
410 395 */
411 396 if (hit_readahead_marker) {
412   - ra_index++;
413   - ra_size = min(4 * ra_size, max);
  397 + ra->start++;
  398 + ra->size = get_next_ra_size(ra, max);
414 399 }
415 400  
416   -fill_ra:
417   - ra_set_index(ra, offset, ra_index);
418   - ra_set_size(ra, ra_size, la_size);
419   -
  401 +readit:
420 402 return ra_submit(ra, mapping, filp);
421 403 }
422 404