Blame view
mm/truncate.c
17.9 KB
1da177e4c
|
1 2 3 4 5 |
/* * mm/truncate.c - code for taking down pages from address_spaces * * Copyright (C) 2002, Linus Torvalds * |
e1f8e8744
|
6 |
* 10Sep2002 Andrew Morton |
1da177e4c
|
7 8 9 10 |
* Initial version. */ #include <linux/kernel.h> |
4af3c9cc4
|
11 |
#include <linux/backing-dev.h> |
5a0e3ad6a
|
12 |
#include <linux/gfp.h> |
1da177e4c
|
13 |
#include <linux/mm.h> |
0fd0e6b05
|
14 |
#include <linux/swap.h> |
1da177e4c
|
15 16 |
#include <linux/module.h> #include <linux/pagemap.h> |
01f2705da
|
17 |
#include <linux/highmem.h> |
1da177e4c
|
18 |
#include <linux/pagevec.h> |
e08748ce0
|
19 |
#include <linux/task_io_accounting_ops.h> |
1da177e4c
|
20 |
#include <linux/buffer_head.h> /* grr. try_to_release_page, |
aaa4059bc
|
21 |
do_invalidatepage */ |
c515e1fd3
|
22 |
#include <linux/cleancache.h> |
ba470de43
|
23 |
#include "internal.h" |
1da177e4c
|
24 |
|
cf9a2ae8d
|
25 |
/** |
28bc44d7d
|
26 |
* do_invalidatepage - invalidate part or all of a page |
cf9a2ae8d
|
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
* @page: the page which is affected * @offset: the index of the truncation point * * do_invalidatepage() is called when all or part of the page has become * invalidated by a truncate operation. * * do_invalidatepage() does not have to release all buffers, but it must * ensure that no dirty buffer is left outside @offset and that no I/O * is underway against any of the blocks which are outside the truncation * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ void do_invalidatepage(struct page *page, unsigned long offset) { void (*invalidatepage)(struct page *, unsigned long); invalidatepage = page->mapping->a_ops->invalidatepage; |
9361401eb
|
43 |
#ifdef CONFIG_BLOCK |
cf9a2ae8d
|
44 45 |
if (!invalidatepage) invalidatepage = block_invalidatepage; |
9361401eb
|
46 |
#endif |
cf9a2ae8d
|
47 48 49 |
if (invalidatepage) (*invalidatepage)(page, offset); } |
1da177e4c
|
50 51 |
static inline void truncate_partial_page(struct page *page, unsigned partial) { |
eebd2aa35
|
52 |
zero_user_segment(page, partial, PAGE_CACHE_SIZE); |
c515e1fd3
|
53 |
cleancache_flush_page(page->mapping, page); |
266cf658e
|
54 |
if (page_has_private(page)) |
1da177e4c
|
55 56 |
do_invalidatepage(page, partial); } |
ecdfc9787
|
57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
/* * This cancels just the dirty bit on the kernel page itself, it * does NOT actually remove dirty bits on any mmap's that may be * around. It also leaves the page tagged dirty, so any sync * activity will still find it on the dirty lists, and in particular, * clear_page_dirty_for_io() will still look at the dirty bits in * the VM. * * Doing this should *normally* only ever be done when a page * is truncated, and is not actually mapped anywhere at all. However, * fs/buffer.c does this when it notices that somebody has cleaned * out all the buffers on a page without actually doing it through * the VM. Can you say "ext3 is horribly ugly"? Tought you could. */ |
fba2591bf
|
71 72 |
void cancel_dirty_page(struct page *page, unsigned int account_size) { |
8368e328d
|
73 74 75 76 |
if (TestClearPageDirty(page)) { struct address_space *mapping = page->mapping; if (mapping && mapping_cap_account_dirty(mapping)) { dec_zone_page_state(page, NR_FILE_DIRTY); |
c9e51e418
|
77 78 |
dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); |
8368e328d
|
79 80 81 |
if (account_size) task_io_account_cancelled_write(account_size); } |
3e67c0987
|
82 |
} |
fba2591bf
|
83 |
} |
8368e328d
|
84 |
EXPORT_SYMBOL(cancel_dirty_page); |
fba2591bf
|
85 |
|
1da177e4c
|
86 87 |
/* * If truncate cannot remove the fs-private metadata from the page, the page |
62e1c5530
|
88 |
* becomes orphaned. It will be left on the LRU and may even be mapped into |
54cb8821d
|
89 |
* user pagetables if we're racing with filemap_fault(). |
1da177e4c
|
90 91 92 |
* * We need to bale out if page->mapping is no longer equal to the original * mapping. This happens a) when the VM reclaimed the page while we waited on |
fc0ecff69
|
93 |
* its lock, b) when a concurrent invalidate_mapping_pages got there first and |
1da177e4c
|
94 95 |
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. */ |
750b4987b
|
96 |
static int |
1da177e4c
|
97 98 99 |
truncate_complete_page(struct address_space *mapping, struct page *page) { if (page->mapping != mapping) |
750b4987b
|
100 |
return -EIO; |
1da177e4c
|
101 |
|
266cf658e
|
102 |
if (page_has_private(page)) |
1da177e4c
|
103 |
do_invalidatepage(page, 0); |
a2b345642
|
104 |
cancel_dirty_page(page, PAGE_CACHE_SIZE); |
ba470de43
|
105 |
clear_page_mlock(page); |
1da177e4c
|
106 |
ClearPageMappedToDisk(page); |
5adc7b518
|
107 |
delete_from_page_cache(page); |
750b4987b
|
108 |
return 0; |
1da177e4c
|
109 110 111 |
} /* |
fc0ecff69
|
112 |
* This is for invalidate_mapping_pages(). That function can be called at |
1da177e4c
|
113 |
* any time, and is not supposed to throw away dirty pages. But pages can |
0fd0e6b05
|
114 115 |
* be marked dirty at any time too, so use remove_mapping which safely * discards clean, unused pages. |
1da177e4c
|
116 117 118 119 120 121 |
* * Returns non-zero if the page was successfully invalidated. */ static int invalidate_complete_page(struct address_space *mapping, struct page *page) { |
0fd0e6b05
|
122 |
int ret; |
1da177e4c
|
123 124 |
if (page->mapping != mapping) return 0; |
266cf658e
|
125 |
if (page_has_private(page) && !try_to_release_page(page, 0)) |
1da177e4c
|
126 |
return 0; |
ba470de43
|
127 |
clear_page_mlock(page); |
0fd0e6b05
|
128 |
ret = remove_mapping(mapping, page); |
0fd0e6b05
|
129 130 |
return ret; |
1da177e4c
|
131 |
} |
750b4987b
|
132 133 134 135 136 137 138 139 140 |
int truncate_inode_page(struct address_space *mapping, struct page *page) { if (page_mapped(page)) { unmap_mapping_range(mapping, (loff_t)page->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, 0); } return truncate_complete_page(mapping, page); } |
83f786680
|
141 |
/* |
257187362
|
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
* Used to get rid of pages on hardware memory corruption. */ int generic_error_remove_page(struct address_space *mapping, struct page *page) { if (!mapping) return -EINVAL; /* * Only punch for normal data pages for now. * Handling other types like directories would need more auditing. */ if (!S_ISREG(mapping->host->i_mode)) return -EIO; return truncate_inode_page(mapping, page); } EXPORT_SYMBOL(generic_error_remove_page); /* |
83f786680
|
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
* Safely invalidate one page from its pagecache mapping. * It only drops clean, unused pages. The page must be locked. * * Returns 1 if the page is successfully invalidated, otherwise 0. */ int invalidate_inode_page(struct page *page) { struct address_space *mapping = page_mapping(page); if (!mapping) return 0; if (PageDirty(page) || PageWriteback(page)) return 0; if (page_mapped(page)) return 0; return invalidate_complete_page(mapping, page); } |
1da177e4c
|
175 |
/** |
0643245f5
|
176 |
* truncate_inode_pages - truncate range of pages specified by start & end byte offsets |
1da177e4c
|
177 178 |
* @mapping: mapping to truncate * @lstart: offset from which to truncate |
d7339071f
|
179 |
* @lend: offset to which to truncate |
1da177e4c
|
180 |
* |
d7339071f
|
181 182 183 |
* Truncate the page cache, removing the pages that are between * specified offsets (and zeroing out partial page * (if lstart is not page aligned)). |
1da177e4c
|
184 185 186 187 188 189 190 |
* * Truncate takes two passes - the first pass is nonblocking. It will not * block on page locks and it will not block on writeback. The second pass * will wait. This is to prevent as much IO as possible in the affected region. * The first pass will remove most pages, so the search cost of the second pass * is low. * |
1da177e4c
|
191 192 193 |
* We pass down the cache-hot hint to the page freeing code. Even if the * mapping is large, it is probably the case that the final pages are the most * recently touched, and freeing happens in ascending file offset order. |
1da177e4c
|
194 |
*/ |
d7339071f
|
195 196 |
void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart, loff_t lend) |
1da177e4c
|
197 198 199 200 |
{ const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); struct pagevec pvec; |
b85e0effd
|
201 202 |
pgoff_t index; pgoff_t end; |
1da177e4c
|
203 |
int i; |
c515e1fd3
|
204 |
cleancache_flush_inode(mapping); |
1da177e4c
|
205 206 |
if (mapping->nrpages == 0) return; |
d7339071f
|
207 208 |
BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); end = (lend >> PAGE_CACHE_SHIFT); |
1da177e4c
|
209 |
pagevec_init(&pvec, 0); |
b85e0effd
|
210 211 212 |
index = start; while (index <= end && pagevec_lookup(&pvec, mapping, index, min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
e5598f8bf
|
213 |
mem_cgroup_uncharge_start(); |
1da177e4c
|
214 215 |
for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; |
1da177e4c
|
216 |
|
b85e0effd
|
217 218 219 |
/* We rely upon deletion not changing page->index */ index = page->index; if (index > end) |
d7339071f
|
220 |
break; |
d7339071f
|
221 |
|
529ae9aaa
|
222 |
if (!trylock_page(page)) |
1da177e4c
|
223 |
continue; |
b85e0effd
|
224 |
WARN_ON(page->index != index); |
1da177e4c
|
225 226 227 228 |
if (PageWriteback(page)) { unlock_page(page); continue; } |
750b4987b
|
229 |
truncate_inode_page(mapping, page); |
1da177e4c
|
230 231 232 |
unlock_page(page); } pagevec_release(&pvec); |
e5598f8bf
|
233 |
mem_cgroup_uncharge_end(); |
1da177e4c
|
234 |
cond_resched(); |
b85e0effd
|
235 |
index++; |
1da177e4c
|
236 237 238 239 240 241 242 243 244 245 246 |
} if (partial) { struct page *page = find_lock_page(mapping, start - 1); if (page) { wait_on_page_writeback(page); truncate_partial_page(page, partial); unlock_page(page); page_cache_release(page); } } |
b85e0effd
|
247 |
index = start; |
1da177e4c
|
248 249 |
for ( ; ; ) { cond_resched(); |
b85e0effd
|
250 251 252 |
if (!pagevec_lookup(&pvec, mapping, index, min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { if (index == start) |
1da177e4c
|
253 |
break; |
b85e0effd
|
254 |
index = start; |
1da177e4c
|
255 256 |
continue; } |
d0823576b
|
257 |
if (index == start && pvec.pages[0]->index > end) { |
d7339071f
|
258 259 260 |
pagevec_release(&pvec); break; } |
569b846df
|
261 |
mem_cgroup_uncharge_start(); |
1da177e4c
|
262 263 |
for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; |
b85e0effd
|
264 265 266 |
/* We rely upon deletion not changing page->index */ index = page->index; if (index > end) |
d7339071f
|
267 |
break; |
b85e0effd
|
268 |
|
1da177e4c
|
269 |
lock_page(page); |
b85e0effd
|
270 |
WARN_ON(page->index != index); |
1da177e4c
|
271 |
wait_on_page_writeback(page); |
750b4987b
|
272 |
truncate_inode_page(mapping, page); |
1da177e4c
|
273 274 275 |
unlock_page(page); } pagevec_release(&pvec); |
569b846df
|
276 |
mem_cgroup_uncharge_end(); |
b85e0effd
|
277 |
index++; |
1da177e4c
|
278 |
} |
c515e1fd3
|
279 |
cleancache_flush_inode(mapping); |
1da177e4c
|
280 |
} |
d7339071f
|
281 |
EXPORT_SYMBOL(truncate_inode_pages_range); |
1da177e4c
|
282 |
|
d7339071f
|
283 284 285 286 287 |
/** * truncate_inode_pages - truncate *all* the pages from an offset * @mapping: mapping to truncate * @lstart: offset from which to truncate * |
1b1dcc1b5
|
288 |
* Called under (and serialised by) inode->i_mutex. |
08142579b
|
289 290 291 292 293 |
* * Note: When this function returns, there can be a page in the process of * deletion (inside __delete_from_page_cache()) in the specified range. Thus * mapping->nrpages can be non-zero when this function returns even after * truncation of the whole mapping. |
d7339071f
|
294 295 296 297 298 |
*/ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) { truncate_inode_pages_range(mapping, lstart, (loff_t)-1); } |
1da177e4c
|
299 |
EXPORT_SYMBOL(truncate_inode_pages); |
286973552
|
300 301 302 303 304 305 306 307 308 309 310 311 312 313 |
/** * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode * @mapping: the address_space which holds the pages to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) * * This function only removes the unlocked pages, if you want to * remove all the pages of one inode, you must call truncate_inode_pages. * * invalidate_mapping_pages() will not block on IO activity. It will not * invalidate pages which are dirty, locked, under writeback or mapped into * pagetables. */ unsigned long invalidate_mapping_pages(struct address_space *mapping, |
315601809
|
314 |
pgoff_t start, pgoff_t end) |
1da177e4c
|
315 316 |
{ struct pagevec pvec; |
b85e0effd
|
317 |
pgoff_t index = start; |
315601809
|
318 319 |
unsigned long ret; unsigned long count = 0; |
1da177e4c
|
320 |
int i; |
31475dd61
|
321 322 323 324 325 326 327 |
/* * Note: this function may get called on a shmem/tmpfs mapping: * pagevec_lookup() might then return 0 prematurely (because it * got a gangful of swap entries); but it's hardly worth worrying * about - it can rarely have anything to free from such a mapping * (most pages are dirty), and already skips over any difficulties. */ |
1da177e4c
|
328 |
pagevec_init(&pvec, 0); |
b85e0effd
|
329 330 |
while (index <= end && pagevec_lookup(&pvec, mapping, index, min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
569b846df
|
331 |
mem_cgroup_uncharge_start(); |
1da177e4c
|
332 333 |
for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; |
e0f23603f
|
334 |
|
b85e0effd
|
335 |
/* We rely upon deletion not changing page->index */ |
e0f23603f
|
336 |
index = page->index; |
b85e0effd
|
337 338 |
if (index > end) break; |
e0f23603f
|
339 |
|
b85e0effd
|
340 341 342 |
if (!trylock_page(page)) continue; WARN_ON(page->index != index); |
315601809
|
343 |
ret = invalidate_inode_page(page); |
1da177e4c
|
344 |
unlock_page(page); |
315601809
|
345 346 347 348 349 350 351 |
/* * Invalidation is a hint that the page is no longer * of interest and try to speed up its reclaim. */ if (!ret) deactivate_page(page); count += ret; |
1da177e4c
|
352 353 |
} pagevec_release(&pvec); |
569b846df
|
354 |
mem_cgroup_uncharge_end(); |
286973552
|
355 |
cond_resched(); |
b85e0effd
|
356 |
index++; |
1da177e4c
|
357 |
} |
315601809
|
358 |
return count; |
1da177e4c
|
359 |
} |
54bc48552
|
360 |
EXPORT_SYMBOL(invalidate_mapping_pages); |
1da177e4c
|
361 |
|
bd4c8ce41
|
362 363 364 365 |
/* * This is like invalidate_complete_page(), except it ignores the page's * refcount. We do this because invalidate_inode_pages2() needs stronger * invalidation guarantees, and cannot afford to leave pages behind because |
2706a1b89
|
366 367 |
* shrink_page_list() has a temp ref on them, or because they're transiently * sitting in the lru_cache_add() pagevecs. |
bd4c8ce41
|
368 369 370 371 372 373 |
*/ static int invalidate_complete_page2(struct address_space *mapping, struct page *page) { if (page->mapping != mapping) return 0; |
266cf658e
|
374 |
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) |
bd4c8ce41
|
375 |
return 0; |
19fd62312
|
376 |
spin_lock_irq(&mapping->tree_lock); |
bd4c8ce41
|
377 378 |
if (PageDirty(page)) goto failed; |
ba470de43
|
379 |
clear_page_mlock(page); |
266cf658e
|
380 |
BUG_ON(page_has_private(page)); |
e64a782fe
|
381 |
__delete_from_page_cache(page); |
19fd62312
|
382 |
spin_unlock_irq(&mapping->tree_lock); |
e767e0561
|
383 |
mem_cgroup_uncharge_cache_page(page); |
6072d13c4
|
384 385 386 |
if (mapping->a_ops->freepage) mapping->a_ops->freepage(page); |
bd4c8ce41
|
387 388 389 |
page_cache_release(page); /* pagecache ref */ return 1; failed: |
19fd62312
|
390 |
spin_unlock_irq(&mapping->tree_lock); |
bd4c8ce41
|
391 392 |
return 0; } |
e3db7691e
|
393 394 395 396 397 398 399 400 |
static int do_launder_page(struct address_space *mapping, struct page *page) { if (!PageDirty(page)) return 0; if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) return 0; return mapping->a_ops->launder_page(page); } |
1da177e4c
|
401 402 |
/** * invalidate_inode_pages2_range - remove range of pages from an address_space |
67be2dd1b
|
403 |
* @mapping: the address_space |
1da177e4c
|
404 405 406 407 408 409 |
* @start: the page offset 'from' which to invalidate * @end: the page offset 'to' which to invalidate (inclusive) * * Any pages which are found to be mapped into pagetables are unmapped prior to * invalidation. * |
6ccfa806a
|
410 |
* Returns -EBUSY if any pages could not be invalidated. |
1da177e4c
|
411 412 413 414 415 |
*/ int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end) { struct pagevec pvec; |
b85e0effd
|
416 |
pgoff_t index; |
1da177e4c
|
417 418 |
int i; int ret = 0; |
0dd1334fa
|
419 |
int ret2 = 0; |
1da177e4c
|
420 |
int did_range_unmap = 0; |
1da177e4c
|
421 |
|
c515e1fd3
|
422 |
cleancache_flush_inode(mapping); |
1da177e4c
|
423 |
pagevec_init(&pvec, 0); |
b85e0effd
|
424 425 426 |
index = start; while (index <= end && pagevec_lookup(&pvec, mapping, index, min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { |
569b846df
|
427 |
mem_cgroup_uncharge_start(); |
7b965e088
|
428 |
for (i = 0; i < pagevec_count(&pvec); i++) { |
1da177e4c
|
429 |
struct page *page = pvec.pages[i]; |
b85e0effd
|
430 431 432 433 434 |
/* We rely upon deletion not changing page->index */ index = page->index; if (index > end) break; |
1da177e4c
|
435 436 |
lock_page(page); |
b85e0effd
|
437 |
WARN_ON(page->index != index); |
1da177e4c
|
438 439 440 441 |
if (page->mapping != mapping) { unlock_page(page); continue; } |
1da177e4c
|
442 |
wait_on_page_writeback(page); |
d00806b18
|
443 |
if (page_mapped(page)) { |
1da177e4c
|
444 445 446 447 448 |
if (!did_range_unmap) { /* * Zap the rest of the file in one hit. */ unmap_mapping_range(mapping, |
b85e0effd
|
449 450 451 |
(loff_t)index << PAGE_CACHE_SHIFT, (loff_t)(1 + end - index) << PAGE_CACHE_SHIFT, |
1da177e4c
|
452 453 454 455 456 457 458 |
0); did_range_unmap = 1; } else { /* * Just zap this page */ unmap_mapping_range(mapping, |
b85e0effd
|
459 460 |
(loff_t)index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, 0); |
1da177e4c
|
461 462 |
} } |
d00806b18
|
463 |
BUG_ON(page_mapped(page)); |
0dd1334fa
|
464 465 466 |
ret2 = do_launder_page(mapping, page); if (ret2 == 0) { if (!invalidate_complete_page2(mapping, page)) |
6ccfa806a
|
467 |
ret2 = -EBUSY; |
0dd1334fa
|
468 469 470 |
} if (ret2 < 0) ret = ret2; |
1da177e4c
|
471 472 473 |
unlock_page(page); } pagevec_release(&pvec); |
569b846df
|
474 |
mem_cgroup_uncharge_end(); |
1da177e4c
|
475 |
cond_resched(); |
b85e0effd
|
476 |
index++; |
1da177e4c
|
477 |
} |
c515e1fd3
|
478 |
cleancache_flush_inode(mapping); |
1da177e4c
|
479 480 481 482 483 484 |
return ret; } EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); /** * invalidate_inode_pages2 - remove all pages from an address_space |
67be2dd1b
|
485 |
* @mapping: the address_space |
1da177e4c
|
486 487 488 489 |
* * Any pages which are found to be mapped into pagetables are unmapped prior to * invalidation. * |
e9de25dda
|
490 |
* Returns -EBUSY if any pages could not be invalidated. |
1da177e4c
|
491 492 493 494 495 496 |
*/ int invalidate_inode_pages2(struct address_space *mapping) { return invalidate_inode_pages2_range(mapping, 0, -1); } EXPORT_SYMBOL_GPL(invalidate_inode_pages2); |
25d9e2d15
|
497 498 499 500 |
/** * truncate_pagecache - unmap and remove pagecache that has been truncated * @inode: inode |
8a549bea5
|
501 502 |
* @oldsize: old file size * @newsize: new file size |
25d9e2d15
|
503 504 505 506 507 508 509 510 511 512 513 |
* * inode's new i_size must already be written before truncate_pagecache * is called. * * This function should typically be called before the filesystem * releases resources associated with the freed range (eg. deallocates * blocks). This way, pagecache will always stay logically coherent * with on-disk format, and the filesystem would not have to deal with * situations such as writepage being called for a page that has already * had its underlying blocks deallocated. */ |
8a549bea5
|
514 |
void truncate_pagecache(struct inode *inode, loff_t oldsize, loff_t newsize) |
25d9e2d15
|
515 |
{ |
cedabed49
|
516 |
struct address_space *mapping = inode->i_mapping; |
8a549bea5
|
517 |
loff_t holebegin = round_up(newsize, PAGE_SIZE); |
cedabed49
|
518 519 520 521 522 523 524 525 526 527 |
/* * unmap_mapping_range is called twice, first simply for * efficiency so that truncate_inode_pages does fewer * single-page unmaps. However after this first call, and * before truncate_inode_pages finishes, it is possible for * private pages to be COWed, which remain after * truncate_inode_pages finishes, hence the second * unmap_mapping_range call must be made for correctness. */ |
8a549bea5
|
528 529 530 |
unmap_mapping_range(mapping, holebegin, 0, 1); truncate_inode_pages(mapping, newsize); unmap_mapping_range(mapping, holebegin, 0, 1); |
25d9e2d15
|
531 532 533 534 |
} EXPORT_SYMBOL(truncate_pagecache); /** |
2c27c65ed
|
535 536 537 538 |
* truncate_setsize - update inode and pagecache for a new file size * @inode: inode * @newsize: new file size * |
382e27daa
|
539 540 541 |
* truncate_setsize updates i_size and performs pagecache truncation (if * necessary) to @newsize. It will be typically be called from the filesystem's * setattr function when ATTR_SIZE is passed in. |
2c27c65ed
|
542 |
* |
382e27daa
|
543 544 |
* Must be called with inode_mutex held and before all filesystem specific * block truncation has been performed. |
2c27c65ed
|
545 546 547 548 549 550 551 552 553 554 555 556 557 |
*/ void truncate_setsize(struct inode *inode, loff_t newsize) { loff_t oldsize; oldsize = inode->i_size; i_size_write(inode, newsize); truncate_pagecache(inode, oldsize, newsize); } EXPORT_SYMBOL(truncate_setsize); /** |
25d9e2d15
|
558 559 |
* vmtruncate - unmap mappings "freed" by truncate() syscall * @inode: inode of the file used |
8a549bea5
|
560 |
* @newsize: file offset to start truncating |
25d9e2d15
|
561 |
* |
2c27c65ed
|
562 563 |
* This function is deprecated and truncate_setsize or truncate_pagecache * should be used instead, together with filesystem specific block truncation. |
25d9e2d15
|
564 |
*/ |
8a549bea5
|
565 |
int vmtruncate(struct inode *inode, loff_t newsize) |
25d9e2d15
|
566 |
{ |
25d9e2d15
|
567 |
int error; |
8a549bea5
|
568 |
error = inode_newsize_ok(inode, newsize); |
25d9e2d15
|
569 570 |
if (error) return error; |
7bb46a673
|
571 |
|
8a549bea5
|
572 |
truncate_setsize(inode, newsize); |
25d9e2d15
|
573 574 |
if (inode->i_op->truncate) inode->i_op->truncate(inode); |
2c27c65ed
|
575 |
return 0; |
25d9e2d15
|
576 577 |
} EXPORT_SYMBOL(vmtruncate); |
5b8ba1019
|
578 |
|
8a549bea5
|
579 |
int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend) |
5b8ba1019
|
580 581 |
{ struct address_space *mapping = inode->i_mapping; |
8a549bea5
|
582 583 |
loff_t holebegin = round_up(lstart, PAGE_SIZE); loff_t holelen = 1 + lend - holebegin; |
5b8ba1019
|
584 585 586 587 588 589 590 591 592 593 |
/* * If the underlying filesystem is not going to provide * a way to truncate a range of blocks (punch a hole) - * we should return failure right now. */ if (!inode->i_op->truncate_range) return -ENOSYS; mutex_lock(&inode->i_mutex); |
bd5fe6c5e
|
594 |
inode_dio_wait(inode); |
8a549bea5
|
595 596 |
unmap_mapping_range(mapping, holebegin, holelen, 1); inode->i_op->truncate_range(inode, lstart, lend); |
94c1e62df
|
597 |
/* unmap again to remove racily COWed private pages */ |
8a549bea5
|
598 |
unmap_mapping_range(mapping, holebegin, holelen, 1); |
5b8ba1019
|
599 600 601 602 |
mutex_unlock(&inode->i_mutex); return 0; } |