Blame view
mm/readahead.c
15.6 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 |
/* * mm/readahead.c - address_space-level file readahead. * * Copyright (C) 2002, Linus Torvalds * |
e1f8e8744 Remove Andrew Mor... |
6 |
* 09Apr2002 Andrew Morton |
1da177e4c Linux-2.6.12-rc2 |
7 8 9 10 |
* Initial version. */ #include <linux/kernel.h> |
5a0e3ad6a include cleanup: ... |
11 |
#include <linux/gfp.h> |
b95f1b31b mm: Map most file... |
12 |
#include <linux/export.h> |
1da177e4c Linux-2.6.12-rc2 |
13 14 |
#include <linux/blkdev.h> #include <linux/backing-dev.h> |
8bde37f08 [PATCH] io-accoun... |
15 |
#include <linux/task_io_accounting_ops.h> |
1da177e4c Linux-2.6.12-rc2 |
16 |
#include <linux/pagevec.h> |
f5ff8422b Fix warnings with... |
17 |
#include <linux/pagemap.h> |
782182e53 mm: move readahea... |
18 19 |
#include <linux/syscalls.h> #include <linux/file.h> |
1da177e4c Linux-2.6.12-rc2 |
20 |
|
29f175d12 mm/readahead.c: i... |
21 |
#include "internal.h" |
1da177e4c Linux-2.6.12-rc2 |
22 23 24 25 26 27 28 29 |
/* * Initialise a struct file's readahead state. Assumes that the caller has * memset *ra to zero. */ void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) { ra->ra_pages = mapping->backing_dev_info->ra_pages; |
f4e6b498d readahead: combin... |
30 |
ra->prev_pos = -1; |
1da177e4c Linux-2.6.12-rc2 |
31 |
} |
d41cc702c [GFS2] Export fil... |
32 |
EXPORT_SYMBOL_GPL(file_ra_state_init); |
1da177e4c Linux-2.6.12-rc2 |
33 |
|
1da177e4c Linux-2.6.12-rc2 |
34 |
#define list_to_page(head) (list_entry((head)->prev, struct page, lru)) |
03fb3d2af FS-Cache: Release... |
35 36 |
/* * see if a page needs releasing upon read_cache_pages() failure |
266cf658e FS-Cache: Recruit... |
37 38 39 40 |
* - the caller of read_cache_pages() may have set PG_private or PG_fscache * before calling, such as the NFS fs marking pages that are cached locally * on disk, thus we need to give the fs a chance to clean up in the event of * an error |
03fb3d2af FS-Cache: Release... |
41 42 43 44 |
*/ static void read_cache_pages_invalidate_page(struct address_space *mapping, struct page *page) { |
266cf658e FS-Cache: Recruit... |
45 |
if (page_has_private(page)) { |
03fb3d2af FS-Cache: Release... |
46 47 48 |
if (!trylock_page(page)) BUG(); page->mapping = mapping; |
d47992f86 mm: change invali... |
49 |
do_invalidatepage(page, 0, PAGE_CACHE_SIZE); |
03fb3d2af FS-Cache: Release... |
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
page->mapping = NULL; unlock_page(page); } page_cache_release(page); } /* * release a list of pages, invalidating them first if need be */ static void read_cache_pages_invalidate_pages(struct address_space *mapping, struct list_head *pages) { struct page *victim; while (!list_empty(pages)) { victim = list_to_page(pages); list_del(&victim->lru); read_cache_pages_invalidate_page(mapping, victim); } } |
1da177e4c Linux-2.6.12-rc2 |
70 |
/** |
bd40cddae [PATCH] kernel-do... |
71 |
* read_cache_pages - populate an address space with some pages & start reads against them |
1da177e4c Linux-2.6.12-rc2 |
72 73 74 75 76 77 78 79 80 81 82 83 |
* @mapping: the address_space * @pages: The address of a list_head which contains the target pages. These * pages have their ->index populated and are otherwise uninitialised. * @filler: callback routine for filling a single page. * @data: private data for the callback routine. * * Hides the details of the LRU cache etc from the filesystems. */ int read_cache_pages(struct address_space *mapping, struct list_head *pages, int (*filler)(void *, struct page *), void *data) { struct page *page; |
1da177e4c Linux-2.6.12-rc2 |
84 |
int ret = 0; |
1da177e4c Linux-2.6.12-rc2 |
85 86 87 |
while (!list_empty(pages)) { page = list_to_page(pages); list_del(&page->lru); |
eb2be1893 mm: buffered writ... |
88 89 |
if (add_to_page_cache_lru(page, mapping, page->index, GFP_KERNEL)) { |
03fb3d2af FS-Cache: Release... |
90 |
read_cache_pages_invalidate_page(mapping, page); |
1da177e4c Linux-2.6.12-rc2 |
91 92 |
continue; } |
eb2be1893 mm: buffered writ... |
93 |
page_cache_release(page); |
1da177e4c Linux-2.6.12-rc2 |
94 |
ret = filler(data, page); |
eb2be1893 mm: buffered writ... |
95 |
if (unlikely(ret)) { |
03fb3d2af FS-Cache: Release... |
96 |
read_cache_pages_invalidate_pages(mapping, pages); |
1da177e4c Linux-2.6.12-rc2 |
97 98 |
break; } |
8bde37f08 [PATCH] io-accoun... |
99 |
task_io_account_read(PAGE_CACHE_SIZE); |
1da177e4c Linux-2.6.12-rc2 |
100 |
} |
1da177e4c Linux-2.6.12-rc2 |
101 102 103 104 105 106 107 108 |
return ret; } EXPORT_SYMBOL(read_cache_pages); static int read_pages(struct address_space *mapping, struct file *filp, struct list_head *pages, unsigned nr_pages) { |
5b417b187 read-ahead: use p... |
109 |
struct blk_plug plug; |
1da177e4c Linux-2.6.12-rc2 |
110 |
unsigned page_idx; |
994fc28c7 [PATCH] add AOP_T... |
111 |
int ret; |
1da177e4c Linux-2.6.12-rc2 |
112 |
|
5b417b187 read-ahead: use p... |
113 |
blk_start_plug(&plug); |
1da177e4c Linux-2.6.12-rc2 |
114 115 |
if (mapping->a_ops->readpages) { ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); |
029e332ea [PATCH] Cleanup r... |
116 117 |
/* Clean up the remaining pages */ put_pages_list(pages); |
1da177e4c Linux-2.6.12-rc2 |
118 119 |
goto out; } |
1da177e4c Linux-2.6.12-rc2 |
120 121 122 |
for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_to_page(pages); list_del(&page->lru); |
eb2be1893 mm: buffered writ... |
123 |
if (!add_to_page_cache_lru(page, mapping, |
1da177e4c Linux-2.6.12-rc2 |
124 |
page->index, GFP_KERNEL)) { |
9f1a3cfcf [PATCH] AOP_TRUNC... |
125 |
mapping->a_ops->readpage(filp, page); |
eb2be1893 mm: buffered writ... |
126 127 |
} page_cache_release(page); |
1da177e4c Linux-2.6.12-rc2 |
128 |
} |
994fc28c7 [PATCH] add AOP_T... |
129 |
ret = 0; |
5b417b187 read-ahead: use p... |
130 |
|
1da177e4c Linux-2.6.12-rc2 |
131 |
out: |
5b417b187 read-ahead: use p... |
132 |
blk_finish_plug(&plug); |
1da177e4c Linux-2.6.12-rc2 |
133 134 135 136 |
return ret; } /* |
d30a11004 readahead: record... |
137 |
* __do_page_cache_readahead() actually reads a chunk of disk. It allocates all |
1da177e4c Linux-2.6.12-rc2 |
138 139 140 141 142 |
* the pages first, then submits them all for I/O. This avoids the very bad * behaviour which would occur if page allocations are causing VM writeback. * We really don't want to intermingle reads and writes like that. * * Returns the number of pages requested, or the maximum amount of I/O allowed. |
1da177e4c Linux-2.6.12-rc2 |
143 |
*/ |
29f175d12 mm/readahead.c: i... |
144 |
int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, |
46fc3e7b4 readahead: add lo... |
145 146 |
pgoff_t offset, unsigned long nr_to_read, unsigned long lookahead_size) |
1da177e4c Linux-2.6.12-rc2 |
147 148 149 150 151 152 153 154 155 156 157 |
{ struct inode *inode = mapping->host; struct page *page; unsigned long end_index; /* The last page we want to read */ LIST_HEAD(page_pool); int page_idx; int ret = 0; loff_t isize = i_size_read(inode); if (isize == 0) goto out; |
46fc3e7b4 readahead: add lo... |
158 |
end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); |
1da177e4c Linux-2.6.12-rc2 |
159 160 161 162 |
/* * Preallocate as many pages as we will need. */ |
1da177e4c Linux-2.6.12-rc2 |
163 |
for (page_idx = 0; page_idx < nr_to_read; page_idx++) { |
7361f4d8c [PATCH] readahead... |
164 |
pgoff_t page_offset = offset + page_idx; |
c743d96b6 readahead: remove... |
165 |
|
1da177e4c Linux-2.6.12-rc2 |
166 167 |
if (page_offset > end_index) break; |
001281881 mm: use lockless ... |
168 |
rcu_read_lock(); |
1da177e4c Linux-2.6.12-rc2 |
169 |
page = radix_tree_lookup(&mapping->page_tree, page_offset); |
001281881 mm: use lockless ... |
170 |
rcu_read_unlock(); |
0cd6144aa mm + fs: prepare ... |
171 |
if (page && !radix_tree_exceptional_entry(page)) |
1da177e4c Linux-2.6.12-rc2 |
172 |
continue; |
7b1de5868 readahead: readah... |
173 |
page = page_cache_alloc_readahead(mapping); |
1da177e4c Linux-2.6.12-rc2 |
174 175 176 177 |
if (!page) break; page->index = page_offset; list_add(&page->lru, &page_pool); |
46fc3e7b4 readahead: add lo... |
178 179 |
if (page_idx == nr_to_read - lookahead_size) SetPageReadahead(page); |
1da177e4c Linux-2.6.12-rc2 |
180 181 |
ret++; } |
1da177e4c Linux-2.6.12-rc2 |
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
/* * Now start the IO. We ignore I/O errors - if the page is not * uptodate then the caller will launch readpage again, and * will then handle the error. */ if (ret) read_pages(mapping, filp, &page_pool, ret); BUG_ON(!list_empty(&page_pool)); out: return ret; } /* * Chunk the readahead into 2 megabyte units, so that we don't pin too much * memory at once. */ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, |
7361f4d8c [PATCH] readahead... |
200 |
pgoff_t offset, unsigned long nr_to_read) |
1da177e4c Linux-2.6.12-rc2 |
201 |
{ |
1da177e4c Linux-2.6.12-rc2 |
202 203 |
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) return -EINVAL; |
f7e839dd3 readahead: move m... |
204 |
nr_to_read = max_sane_readahead(nr_to_read); |
1da177e4c Linux-2.6.12-rc2 |
205 206 207 208 209 210 211 212 |
while (nr_to_read) { int err; unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; if (this_chunk > nr_to_read) this_chunk = nr_to_read; err = __do_page_cache_readahead(mapping, filp, |
46fc3e7b4 readahead: add lo... |
213 |
offset, this_chunk, 0); |
58d5640eb mm/readahead.c: f... |
214 215 |
if (err < 0) return err; |
1da177e4c Linux-2.6.12-rc2 |
216 217 218 |
offset += this_chunk; nr_to_read -= this_chunk; } |
58d5640eb mm/readahead.c: f... |
219 |
return 0; |
1da177e4c Linux-2.6.12-rc2 |
220 |
} |
6d2be915e mm/readahead.c: f... |
221 |
#define MAX_READAHEAD ((512*4096)/PAGE_CACHE_SIZE) |
1da177e4c Linux-2.6.12-rc2 |
222 |
/* |
1da177e4c Linux-2.6.12-rc2 |
223 224 225 226 227 |
* Given a desired number of PAGE_CACHE_SIZE readahead pages, return a * sensible upper limit. */ unsigned long max_sane_readahead(unsigned long nr) { |
6d2be915e mm/readahead.c: f... |
228 |
return min(nr, MAX_READAHEAD); |
1da177e4c Linux-2.6.12-rc2 |
229 |
} |
5ce1110b9 readahead: data s... |
230 231 |
/* |
c743d96b6 readahead: remove... |
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 |
* Set the initial window size, round to next power of 2 and square * for small size, x 4 for medium, and x 2 for large * for 128k (32 page) max ra * 1-8 page = 32k initial, > 8 page = 128k initial */ static unsigned long get_init_ra_size(unsigned long size, unsigned long max) { unsigned long newsize = roundup_pow_of_two(size); if (newsize <= max / 32) newsize = newsize * 4; else if (newsize <= max / 4) newsize = newsize * 2; else newsize = max; return newsize; } /* |
122a21d11 readahead: on-dem... |
252 253 254 |
* Get the previous window size, ramp it up, and * return it as the new window size. */ |
c743d96b6 readahead: remove... |
255 |
static unsigned long get_next_ra_size(struct file_ra_state *ra, |
122a21d11 readahead: on-dem... |
256 257 |
unsigned long max) { |
f9acc8c7b readahead: sanify... |
258 |
unsigned long cur = ra->size; |
122a21d11 readahead: on-dem... |
259 260 261 |
unsigned long newsize; if (cur < max / 16) |
c743d96b6 readahead: remove... |
262 |
newsize = 4 * cur; |
122a21d11 readahead: on-dem... |
263 |
else |
c743d96b6 readahead: remove... |
264 |
newsize = 2 * cur; |
122a21d11 readahead: on-dem... |
265 266 267 268 269 270 271 272 273 274 |
return min(newsize, max); } /* * On-demand readahead design. * * The fields in struct file_ra_state represent the most-recently-executed * readahead attempt: * |
f9acc8c7b readahead: sanify... |
275 276 277 278 |
* |<----- async_size ---------| * |------------------- size -------------------->| * |==================#===========================| * ^start ^page marked with PG_readahead |
122a21d11 readahead: on-dem... |
279 280 281 282 |
* * To overlap application thinking time and disk I/O time, we do * `readahead pipelining': Do not wait until the application consumed all * readahead pages and stalled on the missing page at readahead_index; |
f9acc8c7b readahead: sanify... |
283 284 285 |
* Instead, submit an asynchronous readahead I/O as soon as there are * only async_size pages left in the readahead window. Normally async_size * will be equal to size, for maximum pipelining. |
122a21d11 readahead: on-dem... |
286 287 288 |
* * In interleaved sequential reads, concurrent streams on the same fd can * be invalidating each other's readahead state. So we flag the new readahead |
f9acc8c7b readahead: sanify... |
289 |
* page at (start+size-async_size) with PG_readahead, and use it as readahead |
122a21d11 readahead: on-dem... |
290 291 292 |
* indicator. The flag won't be set on already cached pages, to avoid the * readahead-for-nothing fuss, saving pointless page cache lookups. * |
f4e6b498d readahead: combin... |
293 |
* prev_pos tracks the last visited byte in the _previous_ read request. |
122a21d11 readahead: on-dem... |
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 |
* It should be maintained by the caller, and will be used for detecting * small random reads. Note that the readahead algorithm checks loosely * for sequential patterns. Hence interleaved reads might be served as * sequential ones. * * There is a special-case: if the first page which the application tries to * read happens to be the first page of the file, it is assumed that a linear * read is about to happen and the window is immediately set to the initial size * based on I/O request size and the max_readahead. * * The code ramps up the readahead size aggressively at first, but slow down as * it approaches max_readhead. */ /* |
10be0b372 readahead: introd... |
309 310 311 312 313 314 315 316 317 318 319 320 |
* Count contiguously cached pages from @offset-1 to @offset-@max, * this count is a conservative estimation of * - length of the sequential read sequence, or * - thrashing threshold in memory tight systems */ static pgoff_t count_history_pages(struct address_space *mapping, struct file_ra_state *ra, pgoff_t offset, unsigned long max) { pgoff_t head; rcu_read_lock(); |
e7b563bb2 mm: filemap: move... |
321 |
head = page_cache_prev_hole(mapping, offset - 1, max); |
10be0b372 readahead: introd... |
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 |
rcu_read_unlock(); return offset - 1 - head; } /* * page cache context based read-ahead */ static int try_context_readahead(struct address_space *mapping, struct file_ra_state *ra, pgoff_t offset, unsigned long req_size, unsigned long max) { pgoff_t size; size = count_history_pages(mapping, ra, offset, max); /* |
2cad40180 readahead: make c... |
341 |
* not enough history pages: |
10be0b372 readahead: introd... |
342 343 |
* it could be a random read */ |
2cad40180 readahead: make c... |
344 |
if (size <= req_size) |
10be0b372 readahead: introd... |
345 346 347 348 349 350 351 352 353 354 |
return 0; /* * starts from beginning of file: * it is a strong indication of long-run stream (or whole-file-read) */ if (size >= offset) size *= 2; ra->start = offset; |
2cad40180 readahead: make c... |
355 356 |
ra->size = min(size + req_size, max); ra->async_size = 1; |
10be0b372 readahead: introd... |
357 358 359 360 361 |
return 1; } /* |
122a21d11 readahead: on-dem... |
362 363 364 365 366 |
* A minimal readahead algorithm for trivial sequential/random reads. */ static unsigned long ondemand_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, |
cf914a7d6 readahead: split ... |
367 |
bool hit_readahead_marker, pgoff_t offset, |
122a21d11 readahead: on-dem... |
368 369 |
unsigned long req_size) { |
fc31d16ad readahead: apply ... |
370 |
unsigned long max = max_sane_readahead(ra->ra_pages); |
af248a0c6 readahead: fix se... |
371 |
pgoff_t prev_offset; |
045a2529a readahead: move t... |
372 373 374 375 376 377 |
/* * start of file */ if (!offset) goto initial_readahead; |
122a21d11 readahead: on-dem... |
378 379 |
/* |
f9acc8c7b readahead: sanify... |
380 |
* It's the expected callback offset, assume sequential access. |
122a21d11 readahead: on-dem... |
381 382 |
* Ramp up sizes, and push forward the readahead window. */ |
045a2529a readahead: move t... |
383 384 |
if ((offset == (ra->start + ra->size - ra->async_size) || offset == (ra->start + ra->size))) { |
f9acc8c7b readahead: sanify... |
385 386 387 388 |
ra->start += ra->size; ra->size = get_next_ra_size(ra, max); ra->async_size = ra->size; goto readit; |
122a21d11 readahead: on-dem... |
389 |
} |
122a21d11 readahead: on-dem... |
390 |
/* |
6b10c6c9f readahead: basic ... |
391 392 393 394 395 396 397 |
* Hit a marked page without valid readahead state. * E.g. interleaved reads. * Query the pagecache for async_size, which normally equals to * readahead size. Ramp it up and use it as the new readahead size. */ if (hit_readahead_marker) { pgoff_t start; |
30002ed2e mm: readahead sca... |
398 |
rcu_read_lock(); |
e7b563bb2 mm: filemap: move... |
399 |
start = page_cache_next_hole(mapping, offset + 1, max); |
30002ed2e mm: readahead sca... |
400 |
rcu_read_unlock(); |
6b10c6c9f readahead: basic ... |
401 402 403 404 405 406 |
if (!start || start - offset > max) return 0; ra->start = start; ra->size = start - offset; /* old async_size */ |
160334a0c readahead: increa... |
407 |
ra->size += req_size; |
6b10c6c9f readahead: basic ... |
408 409 410 411 412 413 |
ra->size = get_next_ra_size(ra, max); ra->async_size = ra->size; goto readit; } /* |
045a2529a readahead: move t... |
414 |
* oversize read |
122a21d11 readahead: on-dem... |
415 |
*/ |
045a2529a readahead: move t... |
416 417 418 419 420 |
if (req_size > max) goto initial_readahead; /* * sequential cache miss |
af248a0c6 readahead: fix se... |
421 422 |
* trivial case: (offset - prev_offset) == 1 * unaligned reads: (offset - prev_offset) == 0 |
045a2529a readahead: move t... |
423 |
*/ |
af248a0c6 readahead: fix se... |
424 425 |
prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT; if (offset - prev_offset <= 1UL) |
045a2529a readahead: move t... |
426 427 428 |
goto initial_readahead; /* |
10be0b372 readahead: introd... |
429 430 431 432 433 434 435 |
* Query the page cache and look for the traces(cached history pages) * that a sequential stream would leave behind. */ if (try_context_readahead(mapping, ra, offset, req_size, max)) goto readit; /* |
045a2529a readahead: move t... |
436 437 438 439 440 441 |
* standalone, small random read * Read as is, and do not pollute the readahead state. */ return __do_page_cache_readahead(mapping, filp, offset, req_size, 0); initial_readahead: |
f9acc8c7b readahead: sanify... |
442 443 444 |
ra->start = offset; ra->size = get_init_ra_size(req_size, max); ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; |
122a21d11 readahead: on-dem... |
445 |
|
f9acc8c7b readahead: sanify... |
446 |
readit: |
51daa88eb readahead: remove... |
447 448 449 450 451 452 453 454 455 |
/* * Will this read hit the readahead marker made by itself? * If so, trigger the readahead marker hit now, and merge * the resulted next readahead window into the current one. */ if (offset == ra->start && ra->size == ra->async_size) { ra->async_size = get_next_ra_size(ra, max); ra->size += ra->async_size; } |
122a21d11 readahead: on-dem... |
456 457 458 459 |
return ra_submit(ra, mapping, filp); } /** |
cf914a7d6 readahead: split ... |
460 |
* page_cache_sync_readahead - generic file readahead |
122a21d11 readahead: on-dem... |
461 462 463 |
* @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @filp: passed on to ->readpage() and ->readpages() |
cf914a7d6 readahead: split ... |
464 |
* @offset: start offset into @mapping, in pagecache page-sized units |
122a21d11 readahead: on-dem... |
465 |
* @req_size: hint: total size of the read which the caller is performing in |
cf914a7d6 readahead: split ... |
466 |
* pagecache pages |
122a21d11 readahead: on-dem... |
467 |
* |
cf914a7d6 readahead: split ... |
468 469 470 471 |
* page_cache_sync_readahead() should be called when a cache miss happened: * it will submit the read. The readahead logic may decide to piggyback more * pages onto the read request if access patterns suggest it will improve * performance. |
122a21d11 readahead: on-dem... |
472 |
*/ |
cf914a7d6 readahead: split ... |
473 474 475 |
void page_cache_sync_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, pgoff_t offset, unsigned long req_size) |
122a21d11 readahead: on-dem... |
476 477 478 |
{ /* no read-ahead */ if (!ra->ra_pages) |
cf914a7d6 readahead: split ... |
479 |
return; |
0141450f6 readahead: introd... |
480 |
/* be dumb */ |
70655c06b readahead: fix NU... |
481 |
if (filp && (filp->f_mode & FMODE_RANDOM)) { |
0141450f6 readahead: introd... |
482 483 484 |
force_page_cache_readahead(mapping, filp, offset, req_size); return; } |
cf914a7d6 readahead: split ... |
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 |
/* do read-ahead */ ondemand_readahead(mapping, ra, filp, false, offset, req_size); } EXPORT_SYMBOL_GPL(page_cache_sync_readahead); /** * page_cache_async_readahead - file readahead for marked pages * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @filp: passed on to ->readpage() and ->readpages() * @page: the page at @offset which has the PG_readahead flag set * @offset: start offset into @mapping, in pagecache page-sized units * @req_size: hint: total size of the read which the caller is performing in * pagecache pages * |
bf8abe8b9 readahead.c: fix ... |
500 |
* page_cache_async_readahead() should be called when a page is used which |
f7850d932 mm/readahead: fix... |
501 |
* has the PG_readahead flag; this is a marker to suggest that the application |
cf914a7d6 readahead: split ... |
502 |
* has used up enough of the readahead window that we should start pulling in |
f7850d932 mm/readahead: fix... |
503 504 |
* more pages. */ |
cf914a7d6 readahead: split ... |
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 |
void page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, struct page *page, pgoff_t offset, unsigned long req_size) { /* no read-ahead */ if (!ra->ra_pages) return; /* * Same bit is used for PG_readahead and PG_reclaim. */ if (PageWriteback(page)) return; ClearPageReadahead(page); /* * Defer asynchronous read-ahead on IO congestion. */ if (bdi_read_congested(mapping->backing_dev_info)) return; |
122a21d11 readahead: on-dem... |
528 529 |
/* do read-ahead */ |
cf914a7d6 readahead: split ... |
530 |
ondemand_readahead(mapping, ra, filp, true, offset, req_size); |
122a21d11 readahead: on-dem... |
531 |
} |
cf914a7d6 readahead: split ... |
532 |
EXPORT_SYMBOL_GPL(page_cache_async_readahead); |
782182e53 mm: move readahea... |
533 534 535 536 537 |
static ssize_t do_readahead(struct address_space *mapping, struct file *filp, pgoff_t index, unsigned long nr) { |
63d0f0a3c mm/readahead.c:do... |
538 |
if (!mapping || !mapping->a_ops) |
782182e53 mm: move readahea... |
539 |
return -EINVAL; |
58d5640eb mm/readahead.c: f... |
540 |
return force_page_cache_readahead(mapping, filp, index, nr); |
782182e53 mm: move readahea... |
541 |
} |
4a0fd5bf0 teach SYSCALL_DEF... |
542 |
SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) |
782182e53 mm: move readahea... |
543 544 |
{ ssize_t ret; |
2903ff019 switch simple cas... |
545 |
struct fd f; |
782182e53 mm: move readahea... |
546 547 |
ret = -EBADF; |
2903ff019 switch simple cas... |
548 549 550 551 |
f = fdget(fd); if (f.file) { if (f.file->f_mode & FMODE_READ) { struct address_space *mapping = f.file->f_mapping; |
782182e53 mm: move readahea... |
552 553 554 |
pgoff_t start = offset >> PAGE_CACHE_SHIFT; pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; unsigned long len = end - start + 1; |
2903ff019 switch simple cas... |
555 |
ret = do_readahead(mapping, f.file, start, len); |
782182e53 mm: move readahea... |
556 |
} |
2903ff019 switch simple cas... |
557 |
fdput(f); |
782182e53 mm: move readahea... |
558 559 560 |
} return ret; } |