Blame view
mm/truncate.c
15.6 KB
1da177e4c
|
1 2 3 4 5 |
/* * mm/truncate.c - code for taking down pages from address_spaces * * Copyright (C) 2002, Linus Torvalds * |
e1f8e8744
|
6 |
* 10Sep2002 Andrew Morton |
1da177e4c
|
7 8 9 10 |
* Initial version. */ #include <linux/kernel.h> |
4af3c9cc4
|
11 |
#include <linux/backing-dev.h> |
1da177e4c
|
12 |
#include <linux/mm.h> |
0fd0e6b05
|
13 |
#include <linux/swap.h> |
1da177e4c
|
14 15 |
#include <linux/module.h> #include <linux/pagemap.h> |
01f2705da
|
16 |
#include <linux/highmem.h> |
1da177e4c
|
17 |
#include <linux/pagevec.h> |
e08748ce0
|
18 |
#include <linux/task_io_accounting_ops.h> |
1da177e4c
|
19 |
#include <linux/buffer_head.h> /* grr. try_to_release_page, |
aaa4059bc
|
20 |
do_invalidatepage */ |
ba470de43
|
21 |
#include "internal.h" |
1da177e4c
|
22 |
|
cf9a2ae8d
|
23 |
/** |
28bc44d7d
|
24 |
* do_invalidatepage - invalidate part or all of a page |
cf9a2ae8d
|
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
* @page: the page which is affected * @offset: the index of the truncation point * * do_invalidatepage() is called when all or part of the page has become * invalidated by a truncate operation. * * do_invalidatepage() does not have to release all buffers, but it must * ensure that no dirty buffer is left outside @offset and that no I/O * is underway against any of the blocks which are outside the truncation * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ void do_invalidatepage(struct page *page, unsigned long offset) { void (*invalidatepage)(struct page *, unsigned long); invalidatepage = page->mapping->a_ops->invalidatepage; |
9361401eb
|
41 |
#ifdef CONFIG_BLOCK |
cf9a2ae8d
|
42 43 |
if (!invalidatepage) invalidatepage = block_invalidatepage; |
9361401eb
|
44 |
#endif |
cf9a2ae8d
|
45 46 47 |
if (invalidatepage) (*invalidatepage)(page, offset); } |
1da177e4c
|
48 49 |
static inline void truncate_partial_page(struct page *page, unsigned partial) { |
eebd2aa35
|
50 |
zero_user_segment(page, partial, PAGE_CACHE_SIZE); |
266cf658e
|
51 |
if (page_has_private(page)) |
1da177e4c
|
52 53 |
do_invalidatepage(page, partial); } |
ecdfc9787
|
54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
/* * This cancels just the dirty bit on the kernel page itself, it * does NOT actually remove dirty bits on any mmap's that may be * around. It also leaves the page tagged dirty, so any sync * activity will still find it on the dirty lists, and in particular, * clear_page_dirty_for_io() will still look at the dirty bits in * the VM. * * Doing this should *normally* only ever be done when a page * is truncated, and is not actually mapped anywhere at all. However, * fs/buffer.c does this when it notices that somebody has cleaned * out all the buffers on a page without actually doing it through * the VM. Can you say "ext3 is horribly ugly"? Tought you could. */ |
fba2591bf
|
68 69 |
void cancel_dirty_page(struct page *page, unsigned int account_size) { |
8368e328d
|
70 71 72 73 |
if (TestClearPageDirty(page)) { struct address_space *mapping = page->mapping; if (mapping && mapping_cap_account_dirty(mapping)) { dec_zone_page_state(page, NR_FILE_DIRTY); |
c9e51e418
|
74 75 |
dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); |
8368e328d
|
76 77 78 |
if (account_size) task_io_account_cancelled_write(account_size); } |
3e67c0987
|
79 |
} |
fba2591bf
|
80 |
} |
8368e328d
|
81 |
EXPORT_SYMBOL(cancel_dirty_page); |
fba2591bf
|
82 |
|
1da177e4c
|
83 84 |
/* * If truncate cannot remove the fs-private metadata from the page, the page |
62e1c5530
|
85 |
* becomes orphaned. It will be left on the LRU and may even be mapped into |
54cb8821d
|
86 |
* user pagetables if we're racing with filemap_fault(). |
1da177e4c
|
87 88 89 |
* * We need to bale out if page->mapping is no longer equal to the original * mapping. This happens a) when the VM reclaimed the page while we waited on |
fc0ecff69
|
90 |
* its lock, b) when a concurrent invalidate_mapping_pages got there first and |
1da177e4c
|
91 92 |
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. */ |
750b4987b
|
93 |
static int |
1da177e4c
|
94 95 96 |
truncate_complete_page(struct address_space *mapping, struct page *page) { if (page->mapping != mapping) |
750b4987b
|
97 |
return -EIO; |
1da177e4c
|
98 |
|
266cf658e
|
99 |
if (page_has_private(page)) |
1da177e4c
|
100 |
do_invalidatepage(page, 0); |
a2b345642
|
101 |
cancel_dirty_page(page, PAGE_CACHE_SIZE); |
ba470de43
|
102 |
clear_page_mlock(page); |
787d2214c
|
103 |
remove_from_page_cache(page); |
1da177e4c
|
104 |
ClearPageMappedToDisk(page); |
1da177e4c
|
105 |
page_cache_release(page); /* pagecache ref */ |
750b4987b
|
106 |
return 0; |
1da177e4c
|
107 108 109 |
} /* |
fc0ecff69
|
110 |
* This is for invalidate_mapping_pages(). That function can be called at |
1da177e4c
|
111 |
* any time, and is not supposed to throw away dirty pages. But pages can |
0fd0e6b05
|
112 113 |
* be marked dirty at any time too, so use remove_mapping which safely * discards clean, unused pages. |
1da177e4c
|
114 115 116 117 118 119 |
* * Returns non-zero if the page was successfully invalidated. */ static int invalidate_complete_page(struct address_space *mapping, struct page *page) { |
0fd0e6b05
|
120 |
int ret; |
1da177e4c
|
121 122 |
if (page->mapping != mapping) return 0; |
266cf658e
|
123 |
if (page_has_private(page) && !try_to_release_page(page, 0)) |
1da177e4c
|
124 |
return 0; |
ba470de43
|
125 |
clear_page_mlock(page); |
0fd0e6b05
|
126 |
ret = remove_mapping(mapping, page); |
0fd0e6b05
|
127 128 |
return ret; |
1da177e4c
|
129 |
} |
750b4987b
|
130 131 132 133 134 135 136 137 138 |
int truncate_inode_page(struct address_space *mapping, struct page *page) { if (page_mapped(page)) { unmap_mapping_range(mapping, (loff_t)page->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, 0); } return truncate_complete_page(mapping, page); } |
83f786680
|
139 |
/* |
257187362
|
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
* Used to get rid of pages on hardware memory corruption. */ int generic_error_remove_page(struct address_space *mapping, struct page *page) { if (!mapping) return -EINVAL; /* * Only punch for normal data pages for now. * Handling other types like directories would need more auditing. */ if (!S_ISREG(mapping->host->i_mode)) return -EIO; return truncate_inode_page(mapping, page); } EXPORT_SYMBOL(generic_error_remove_page); /* |
83f786680
|
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
* Safely invalidate one page from its pagecache mapping. * It only drops clean, unused pages. The page must be locked. * * Returns 1 if the page is successfully invalidated, otherwise 0. */ int invalidate_inode_page(struct page *page) { struct address_space *mapping = page_mapping(page); if (!mapping) return 0; if (PageDirty(page) || PageWriteback(page)) return 0; if (page_mapped(page)) return 0; return invalidate_complete_page(mapping, page); } |
1da177e4c
|
173 |
/** |
0643245f5
|
174 |
* truncate_inode_pages - truncate range of pages specified by start & end byte offsets |
1da177e4c
|
175 176 |
* @mapping: mapping to truncate * @lstart: offset from which to truncate |
d7339071f
|
177 |
* @lend: offset to which to truncate |
1da177e4c
|
178 |
* |
d7339071f
|
179 180 181 |
* Truncate the page cache, removing the pages that are between * specified offsets (and zeroing out partial page * (if lstart is not page aligned)). |
1da177e4c
|
182 183 184 185 186 187 188 189 190 191 192 193 194 |
* * Truncate takes two passes - the first pass is nonblocking. It will not * block on page locks and it will not block on writeback. The second pass * will wait. This is to prevent as much IO as possible in the affected region. * The first pass will remove most pages, so the search cost of the second pass * is low. * * When looking at page->index outside the page lock we need to be careful to * copy it into a local to avoid races (it could change at any time). * * We pass down the cache-hot hint to the page freeing code. Even if the * mapping is large, it is probably the case that the final pages are the most * recently touched, and freeing happens in ascending file offset order. |
1da177e4c
|
195 |
*/ |
d7339071f
|
196 197 |
void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart, loff_t lend) |
1da177e4c
|
198 199 |
{ const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; |
d7339071f
|
200 |
pgoff_t end; |
1da177e4c
|
201 202 203 204 205 206 207 |
const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); struct pagevec pvec; pgoff_t next; int i; if (mapping->nrpages == 0) return; |
d7339071f
|
208 209 |
BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); end = (lend >> PAGE_CACHE_SHIFT); |
1da177e4c
|
210 211 |
pagevec_init(&pvec, 0); next = start; |
d7339071f
|
212 213 |
while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { |
1da177e4c
|
214 215 216 |
for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; pgoff_t page_index = page->index; |
d7339071f
|
217 218 219 220 |
if (page_index > end) { next = page_index; break; } |
1da177e4c
|
221 222 223 |
if (page_index > next) next = page_index; next++; |
529ae9aaa
|
224 |
if (!trylock_page(page)) |
1da177e4c
|
225 226 227 228 229 |
continue; if (PageWriteback(page)) { unlock_page(page); continue; } |
750b4987b
|
230 |
truncate_inode_page(mapping, page); |
1da177e4c
|
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 |
unlock_page(page); } pagevec_release(&pvec); cond_resched(); } if (partial) { struct page *page = find_lock_page(mapping, start - 1); if (page) { wait_on_page_writeback(page); truncate_partial_page(page, partial); unlock_page(page); page_cache_release(page); } } next = start; for ( ; ; ) { cond_resched(); if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { if (next == start) break; next = start; continue; } |
d7339071f
|
256 257 258 259 |
if (pvec.pages[0]->index > end) { pagevec_release(&pvec); break; } |
1da177e4c
|
260 261 |
for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; |
d7339071f
|
262 263 |
if (page->index > end) break; |
1da177e4c
|
264 265 |
lock_page(page); wait_on_page_writeback(page); |
750b4987b
|
266 |
truncate_inode_page(mapping, page); |
1da177e4c
|
267 268 269 |
if (page->index > next) next = page->index; next++; |
1da177e4c
|
270 271 272 273 274 |
unlock_page(page); } pagevec_release(&pvec); } } |
d7339071f
|
275 |
EXPORT_SYMBOL(truncate_inode_pages_range); |
1da177e4c
|
276 |
|
d7339071f
|
277 278 279 280 281 |
/** * truncate_inode_pages - truncate *all* the pages from an offset * @mapping: mapping to truncate * @lstart: offset from which to truncate * |
1b1dcc1b5
|
282 |
* Called under (and serialised by) inode->i_mutex. |
d7339071f
|
283 284 285 286 287 |
*/ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) { truncate_inode_pages_range(mapping, lstart, (loff_t)-1); } |
1da177e4c
|
288 |
EXPORT_SYMBOL(truncate_inode_pages); |
286973552
|
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
/** * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode * @mapping: the address_space which holds the pages to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) * * This function only removes the unlocked pages, if you want to * remove all the pages of one inode, you must call truncate_inode_pages. * * invalidate_mapping_pages() will not block on IO activity. It will not * invalidate pages which are dirty, locked, under writeback or mapped into * pagetables. */ unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) |
1da177e4c
|
304 305 306 307 308 309 310 311 312 313 314 |
{ struct pagevec pvec; pgoff_t next = start; unsigned long ret = 0; int i; pagevec_init(&pvec, 0); while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; |
e0f23603f
|
315 316 |
pgoff_t index; int lock_failed; |
1da177e4c
|
317 |
|
529ae9aaa
|
318 |
lock_failed = !trylock_page(page); |
e0f23603f
|
319 320 321 322 323 324 325 326 327 328 |
/* * We really shouldn't be looking at the ->index of an * unlocked page. But we're not allowed to lock these * pages. So we rely upon nobody altering the ->index * of this (pinned-by-us) page. */ index = page->index; if (index > next) next = index; |
1da177e4c
|
329 |
next++; |
e0f23603f
|
330 331 |
if (lock_failed) continue; |
83f786680
|
332 |
ret += invalidate_inode_page(page); |
1da177e4c
|
333 334 335 336 337 |
unlock_page(page); if (next > end) break; } pagevec_release(&pvec); |
286973552
|
338 |
cond_resched(); |
1da177e4c
|
339 340 341 |
} return ret; } |
54bc48552
|
342 |
EXPORT_SYMBOL(invalidate_mapping_pages); |
1da177e4c
|
343 |
|
bd4c8ce41
|
344 345 346 347 |
/* * This is like invalidate_complete_page(), except it ignores the page's * refcount. We do this because invalidate_inode_pages2() needs stronger * invalidation guarantees, and cannot afford to leave pages behind because |
2706a1b89
|
348 349 |
* shrink_page_list() has a temp ref on them, or because they're transiently * sitting in the lru_cache_add() pagevecs. |
bd4c8ce41
|
350 351 352 353 354 355 |
*/ static int invalidate_complete_page2(struct address_space *mapping, struct page *page) { if (page->mapping != mapping) return 0; |
266cf658e
|
356 |
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) |
bd4c8ce41
|
357 |
return 0; |
19fd62312
|
358 |
spin_lock_irq(&mapping->tree_lock); |
bd4c8ce41
|
359 360 |
if (PageDirty(page)) goto failed; |
ba470de43
|
361 |
clear_page_mlock(page); |
266cf658e
|
362 |
BUG_ON(page_has_private(page)); |
bd4c8ce41
|
363 |
__remove_from_page_cache(page); |
19fd62312
|
364 |
spin_unlock_irq(&mapping->tree_lock); |
e767e0561
|
365 |
mem_cgroup_uncharge_cache_page(page); |
bd4c8ce41
|
366 367 368 |
page_cache_release(page); /* pagecache ref */ return 1; failed: |
19fd62312
|
369 |
spin_unlock_irq(&mapping->tree_lock); |
bd4c8ce41
|
370 371 |
return 0; } |
e3db7691e
|
372 373 374 375 376 377 378 379 |
static int do_launder_page(struct address_space *mapping, struct page *page) { if (!PageDirty(page)) return 0; if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) return 0; return mapping->a_ops->launder_page(page); } |
1da177e4c
|
380 381 |
/** * invalidate_inode_pages2_range - remove range of pages from an address_space |
67be2dd1b
|
382 |
* @mapping: the address_space |
1da177e4c
|
383 384 385 386 387 388 |
* @start: the page offset 'from' which to invalidate * @end: the page offset 'to' which to invalidate (inclusive) * * Any pages which are found to be mapped into pagetables are unmapped prior to * invalidation. * |
6ccfa806a
|
389 |
* Returns -EBUSY if any pages could not be invalidated. |
1da177e4c
|
390 391 392 393 394 395 396 397 |
*/ int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end) { struct pagevec pvec; pgoff_t next; int i; int ret = 0; |
0dd1334fa
|
398 |
int ret2 = 0; |
1da177e4c
|
399 400 401 402 403 |
int did_range_unmap = 0; int wrapped = 0; pagevec_init(&pvec, 0); next = start; |
7b965e088
|
404 |
while (next <= end && !wrapped && |
1da177e4c
|
405 406 |
pagevec_lookup(&pvec, mapping, next, min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
7b965e088
|
407 |
for (i = 0; i < pagevec_count(&pvec); i++) { |
1da177e4c
|
408 409 |
struct page *page = pvec.pages[i]; pgoff_t page_index; |
1da177e4c
|
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 |
lock_page(page); if (page->mapping != mapping) { unlock_page(page); continue; } page_index = page->index; next = page_index + 1; if (next == 0) wrapped = 1; if (page_index > end) { unlock_page(page); break; } wait_on_page_writeback(page); |
d00806b18
|
425 |
if (page_mapped(page)) { |
1da177e4c
|
426 427 428 429 430 |
if (!did_range_unmap) { /* * Zap the rest of the file in one hit. */ unmap_mapping_range(mapping, |
479ef592f
|
431 432 |
(loff_t)page_index<<PAGE_CACHE_SHIFT, (loff_t)(end - page_index + 1) |
1da177e4c
|
433 434 435 436 437 438 439 440 |
<< PAGE_CACHE_SHIFT, 0); did_range_unmap = 1; } else { /* * Just zap this page */ unmap_mapping_range(mapping, |
479ef592f
|
441 |
(loff_t)page_index<<PAGE_CACHE_SHIFT, |
1da177e4c
|
442 443 444 |
PAGE_CACHE_SIZE, 0); } } |
d00806b18
|
445 |
BUG_ON(page_mapped(page)); |
0dd1334fa
|
446 447 448 |
ret2 = do_launder_page(mapping, page); if (ret2 == 0) { if (!invalidate_complete_page2(mapping, page)) |
6ccfa806a
|
449 |
ret2 = -EBUSY; |
0dd1334fa
|
450 451 452 |
} if (ret2 < 0) ret = ret2; |
1da177e4c
|
453 454 455 456 457 458 459 460 461 462 463 |
unlock_page(page); } pagevec_release(&pvec); cond_resched(); } return ret; } EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); /** * invalidate_inode_pages2 - remove all pages from an address_space |
67be2dd1b
|
464 |
* @mapping: the address_space |
1da177e4c
|
465 466 467 468 469 470 471 472 473 474 475 |
* * Any pages which are found to be mapped into pagetables are unmapped prior to * invalidation. * * Returns -EIO if any pages could not be invalidated. */ int invalidate_inode_pages2(struct address_space *mapping) { return invalidate_inode_pages2_range(mapping, 0, -1); } EXPORT_SYMBOL_GPL(invalidate_inode_pages2); |
25d9e2d15
|
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 |
/** * truncate_pagecache - unmap and remove pagecache that has been truncated * @inode: inode * @old: old file offset * @new: new file offset * * inode's new i_size must already be written before truncate_pagecache * is called. * * This function should typically be called before the filesystem * releases resources associated with the freed range (eg. deallocates * blocks). This way, pagecache will always stay logically coherent * with on-disk format, and the filesystem would not have to deal with * situations such as writepage being called for a page that has already * had its underlying blocks deallocated. */ void truncate_pagecache(struct inode *inode, loff_t old, loff_t new) { if (new < old) { struct address_space *mapping = inode->i_mapping; /* * unmap_mapping_range is called twice, first simply for * efficiency so that truncate_inode_pages does fewer * single-page unmaps. However after this first call, and * before truncate_inode_pages finishes, it is possible for * private pages to be COWed, which remain after * truncate_inode_pages finishes, hence the second * unmap_mapping_range call must be made for correctness. */ unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); truncate_inode_pages(mapping, new); unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); } } EXPORT_SYMBOL(truncate_pagecache); /** * vmtruncate - unmap mappings "freed" by truncate() syscall * @inode: inode of the file used * @offset: file offset to start truncating * * NOTE! We have to be ready to update the memory sharing * between the file and the memory map for a potential last * incomplete page. Ugly, but necessary. */ int vmtruncate(struct inode *inode, loff_t offset) { loff_t oldsize; int error; error = inode_newsize_ok(inode, offset); if (error) return error; oldsize = inode->i_size; i_size_write(inode, offset); truncate_pagecache(inode, oldsize, offset); if (inode->i_op->truncate) inode->i_op->truncate(inode); return error; } EXPORT_SYMBOL(vmtruncate); |