Blame view

mm/truncate.c 22.8 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
  /*
   * mm/truncate.c - code for taking down pages from address_spaces
   *
   * Copyright (C) 2002, Linus Torvalds
   *
e1f8e8744   Francois Cami   Remove Andrew Mor...
6
   * 10Sep2002	Andrew Morton
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
7
8
9
10
   *		Initial version.
   */
  
  #include <linux/kernel.h>
4af3c9cc4   Alexey Dobriyan   Drop some headers...
11
  #include <linux/backing-dev.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
12
  #include <linux/gfp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
  #include <linux/mm.h>
0fd0e6b05   Nick Piggin   [PATCH] page inva...
14
  #include <linux/swap.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
15
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
  #include <linux/pagemap.h>
01f2705da   Nate Diller   fs: convert core ...
17
  #include <linux/highmem.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
  #include <linux/pagevec.h>
e08748ce0   Andrew Morton   [PATCH] io-accoun...
19
  #include <linux/task_io_accounting_ops.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
  #include <linux/buffer_head.h>	/* grr. try_to_release_page,
aaa4059bc   Jan Kara   [PATCH] ext3: Fix...
21
  				   do_invalidatepage */
c515e1fd3   Dan Magenheimer   mm/fs: add hooks ...
22
  #include <linux/cleancache.h>
ba470de43   Rik van Riel   mmap: handle mloc...
23
  #include "internal.h"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
24

0cd6144aa   Johannes Weiner   mm + fs: prepare ...
25
26
27
  static void clear_exceptional_entry(struct address_space *mapping,
  				    pgoff_t index, void *entry)
  {
449dd6984   Johannes Weiner   mm: keep page cac...
28
29
  	struct radix_tree_node *node;
  	void **slot;
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
30
31
32
33
34
35
36
37
38
39
  	/* Handled by shmem itself */
  	if (shmem_mapping(mapping))
  		return;
  
  	spin_lock_irq(&mapping->tree_lock);
  	/*
  	 * Regular page slots are stabilized by the page lock even
  	 * without the tree itself locked.  These unlocked entries
  	 * need verification under the tree lock.
  	 */
449dd6984   Johannes Weiner   mm: keep page cac...
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
  	if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
  		goto unlock;
  	if (*slot != entry)
  		goto unlock;
  	radix_tree_replace_slot(slot, NULL);
  	mapping->nrshadows--;
  	if (!node)
  		goto unlock;
  	workingset_node_shadows_dec(node);
  	/*
  	 * Don't track node without shadow entries.
  	 *
  	 * Avoid acquiring the list_lru lock if already untracked.
  	 * The list_empty() test is safe as node->private_list is
  	 * protected by mapping->tree_lock.
  	 */
  	if (!workingset_node_shadows(node) &&
  	    !list_empty(&node->private_list))
  		list_lru_del(&workingset_shadow_nodes, &node->private_list);
  	__radix_tree_delete_node(&mapping->page_tree, node);
  unlock:
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
61
62
  	spin_unlock_irq(&mapping->tree_lock);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
63

cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
64
  /**
28bc44d7d   Fengguang Wu   do_invalidatepage...
65
   * do_invalidatepage - invalidate part or all of a page
cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
66
   * @page: the page which is affected
d47992f86   Lukas Czerner   mm: change invali...
67
68
   * @offset: start of the range to invalidate
   * @length: length of the range to invalidate
cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
69
70
71
72
73
74
75
76
77
78
   *
   * do_invalidatepage() is called when all or part of the page has become
   * invalidated by a truncate operation.
   *
   * do_invalidatepage() does not have to release all buffers, but it must
   * ensure that no dirty buffer is left outside @offset and that no I/O
   * is underway against any of the blocks which are outside the truncation
   * point.  Because the caller is about to free (and possibly reuse) those
   * blocks on-disk.
   */
d47992f86   Lukas Czerner   mm: change invali...
79
80
  void do_invalidatepage(struct page *page, unsigned int offset,
  		       unsigned int length)
cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
81
  {
d47992f86   Lukas Czerner   mm: change invali...
82
  	void (*invalidatepage)(struct page *, unsigned int, unsigned int);
cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
83
  	invalidatepage = page->mapping->a_ops->invalidatepage;
9361401eb   David Howells   [PATCH] BLOCK: Ma...
84
  #ifdef CONFIG_BLOCK
cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
85
86
  	if (!invalidatepage)
  		invalidatepage = block_invalidatepage;
9361401eb   David Howells   [PATCH] BLOCK: Ma...
87
  #endif
cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
88
  	if (invalidatepage)
d47992f86   Lukas Czerner   mm: change invali...
89
  		(*invalidatepage)(page, offset, length);
cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
90
  }
ecdfc9787   Linus Torvalds   Resurrect 'try_to...
91
92
93
94
95
96
97
98
99
100
101
102
103
104
  /*
   * This cancels just the dirty bit on the kernel page itself, it
   * does NOT actually remove dirty bits on any mmap's that may be
   * around. It also leaves the page tagged dirty, so any sync
   * activity will still find it on the dirty lists, and in particular,
   * clear_page_dirty_for_io() will still look at the dirty bits in
   * the VM.
   *
   * Doing this should *normally* only ever be done when a page
   * is truncated, and is not actually mapped anywhere at all. However,
   * fs/buffer.c does this when it notices that somebody has cleaned
   * out all the buffers on a page without actually doing it through
   * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
   */
fba2591bf   Linus Torvalds   VM: Remove "clear...
105
106
  void cancel_dirty_page(struct page *page, unsigned int account_size)
  {
8368e328d   Linus Torvalds   Clean up and expo...
107
108
109
110
  	if (TestClearPageDirty(page)) {
  		struct address_space *mapping = page->mapping;
  		if (mapping && mapping_cap_account_dirty(mapping)) {
  			dec_zone_page_state(page, NR_FILE_DIRTY);
c9e51e418   Peter Zijlstra   mm: count reclaim...
111
112
  			dec_bdi_stat(mapping->backing_dev_info,
  					BDI_RECLAIMABLE);
8368e328d   Linus Torvalds   Clean up and expo...
113
114
115
  			if (account_size)
  				task_io_account_cancelled_write(account_size);
  		}
3e67c0987   Andrew Morton   [PATCH] truncate:...
116
  	}
fba2591bf   Linus Torvalds   VM: Remove "clear...
117
  }
8368e328d   Linus Torvalds   Clean up and expo...
118
  EXPORT_SYMBOL(cancel_dirty_page);
fba2591bf   Linus Torvalds   VM: Remove "clear...
119

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
120
121
  /*
   * If truncate cannot remove the fs-private metadata from the page, the page
62e1c5530   Shaohua Li   page migraton: ha...
122
   * becomes orphaned.  It will be left on the LRU and may even be mapped into
54cb8821d   Nick Piggin   mm: merge populat...
123
   * user pagetables if we're racing with filemap_fault().
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
124
125
126
   *
   * We need to bale out if page->mapping is no longer equal to the original
   * mapping.  This happens a) when the VM reclaimed the page while we waited on
fc0ecff69   Andrew Morton   [PATCH] remove in...
127
   * its lock, b) when a concurrent invalidate_mapping_pages got there first and
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
128
129
   * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
   */
750b4987b   Nick Piggin   HWPOISON: Refacto...
130
  static int
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
131
132
133
  truncate_complete_page(struct address_space *mapping, struct page *page)
  {
  	if (page->mapping != mapping)
750b4987b   Nick Piggin   HWPOISON: Refacto...
134
  		return -EIO;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
135

266cf658e   David Howells   FS-Cache: Recruit...
136
  	if (page_has_private(page))
d47992f86   Lukas Czerner   mm: change invali...
137
  		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
138

a2b345642   Björn Steinbrink   Fix dirty page ac...
139
  	cancel_dirty_page(page, PAGE_CACHE_SIZE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
140
  	ClearPageMappedToDisk(page);
5adc7b518   Minchan Kim   mm: truncate: cha...
141
  	delete_from_page_cache(page);
750b4987b   Nick Piggin   HWPOISON: Refacto...
142
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
143
144
145
  }
  
  /*
fc0ecff69   Andrew Morton   [PATCH] remove in...
146
   * This is for invalidate_mapping_pages().  That function can be called at
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
147
   * any time, and is not supposed to throw away dirty pages.  But pages can
0fd0e6b05   Nick Piggin   [PATCH] page inva...
148
149
   * be marked dirty at any time too, so use remove_mapping which safely
   * discards clean, unused pages.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
150
151
152
153
154
155
   *
   * Returns non-zero if the page was successfully invalidated.
   */
  static int
  invalidate_complete_page(struct address_space *mapping, struct page *page)
  {
0fd0e6b05   Nick Piggin   [PATCH] page inva...
156
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
157
158
  	if (page->mapping != mapping)
  		return 0;
266cf658e   David Howells   FS-Cache: Recruit...
159
  	if (page_has_private(page) && !try_to_release_page(page, 0))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
160
  		return 0;
0fd0e6b05   Nick Piggin   [PATCH] page inva...
161
  	ret = remove_mapping(mapping, page);
0fd0e6b05   Nick Piggin   [PATCH] page inva...
162
163
  
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
164
  }
750b4987b   Nick Piggin   HWPOISON: Refacto...
165
166
167
168
169
170
171
172
173
  int truncate_inode_page(struct address_space *mapping, struct page *page)
  {
  	if (page_mapped(page)) {
  		unmap_mapping_range(mapping,
  				   (loff_t)page->index << PAGE_CACHE_SHIFT,
  				   PAGE_CACHE_SIZE, 0);
  	}
  	return truncate_complete_page(mapping, page);
  }
83f786680   Wu Fengguang   HWPOISON: Add inv...
174
  /*
257187362   Andi Kleen   HWPOISON: Define ...
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
   * Used to get rid of pages on hardware memory corruption.
   */
  int generic_error_remove_page(struct address_space *mapping, struct page *page)
  {
  	if (!mapping)
  		return -EINVAL;
  	/*
  	 * Only punch for normal data pages for now.
  	 * Handling other types like directories would need more auditing.
  	 */
  	if (!S_ISREG(mapping->host->i_mode))
  		return -EIO;
  	return truncate_inode_page(mapping, page);
  }
  EXPORT_SYMBOL(generic_error_remove_page);
  
  /*
83f786680   Wu Fengguang   HWPOISON: Add inv...
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
   * Safely invalidate one page from its pagecache mapping.
   * It only drops clean, unused pages. The page must be locked.
   *
   * Returns 1 if the page is successfully invalidated, otherwise 0.
   */
  int invalidate_inode_page(struct page *page)
  {
  	struct address_space *mapping = page_mapping(page);
  	if (!mapping)
  		return 0;
  	if (PageDirty(page) || PageWriteback(page))
  		return 0;
  	if (page_mapped(page))
  		return 0;
  	return invalidate_complete_page(mapping, page);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
208
  /**
73c1e2043   Liu Bo   mm: fix comment t...
209
   * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
210
211
   * @mapping: mapping to truncate
   * @lstart: offset from which to truncate
5a7203947   Lukas Czerner   mm: teach truncat...
212
   * @lend: offset to which to truncate (inclusive)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
213
   *
d7339071f   Hans Reiser   [PATCH] reiser4: ...
214
   * Truncate the page cache, removing the pages that are between
5a7203947   Lukas Czerner   mm: teach truncat...
215
216
   * specified offsets (and zeroing out partial pages
   * if lstart or lend + 1 is not page aligned).
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
217
218
219
220
221
222
223
   *
   * Truncate takes two passes - the first pass is nonblocking.  It will not
   * block on page locks and it will not block on writeback.  The second pass
   * will wait.  This is to prevent as much IO as possible in the affected region.
   * The first pass will remove most pages, so the search cost of the second pass
   * is low.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
224
225
226
   * We pass down the cache-hot hint to the page freeing code.  Even if the
   * mapping is large, it is probably the case that the final pages are the most
   * recently touched, and freeing happens in ascending file offset order.
5a7203947   Lukas Czerner   mm: teach truncat...
227
228
229
230
   *
   * Note that since ->invalidatepage() accepts range to invalidate
   * truncate_inode_pages_range is able to handle cases where lend + 1 is not
   * page aligned properly.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
231
   */
d7339071f   Hans Reiser   [PATCH] reiser4: ...
232
233
  void truncate_inode_pages_range(struct address_space *mapping,
  				loff_t lstart, loff_t lend)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
234
  {
5a7203947   Lukas Czerner   mm: teach truncat...
235
236
237
238
239
  	pgoff_t		start;		/* inclusive */
  	pgoff_t		end;		/* exclusive */
  	unsigned int	partial_start;	/* inclusive */
  	unsigned int	partial_end;	/* exclusive */
  	struct pagevec	pvec;
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
240
  	pgoff_t		indices[PAGEVEC_SIZE];
5a7203947   Lukas Czerner   mm: teach truncat...
241
242
  	pgoff_t		index;
  	int		i;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
243

3167760f8   Dan Magenheimer   mm: cleancache: s...
244
  	cleancache_invalidate_inode(mapping);
91b0abe36   Johannes Weiner   mm + fs: store sh...
245
  	if (mapping->nrpages == 0 && mapping->nrshadows == 0)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
246
  		return;
5a7203947   Lukas Czerner   mm: teach truncat...
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
  	/* Offsets within partial pages */
  	partial_start = lstart & (PAGE_CACHE_SIZE - 1);
  	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
  
  	/*
  	 * 'start' and 'end' always covers the range of pages to be fully
  	 * truncated. Partial pages are covered with 'partial_start' at the
  	 * start of the range and 'partial_end' at the end of the range.
  	 * Note that 'end' is exclusive while 'lend' is inclusive.
  	 */
  	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  	if (lend == -1)
  		/*
  		 * lend == -1 indicates end-of-file so we have to set 'end'
  		 * to the highest possible pgoff_t and since the type is
  		 * unsigned we're using -1.
  		 */
  		end = -1;
  	else
  		end = (lend + 1) >> PAGE_CACHE_SHIFT;
d7339071f   Hans Reiser   [PATCH] reiser4: ...
267

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
268
  	pagevec_init(&pvec, 0);
b85e0effd   Hugh Dickins   mm: consistent tr...
269
  	index = start;
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
270
271
272
  	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
  			min(end - index, (pgoff_t)PAGEVEC_SIZE),
  			indices)) {
e5598f8bf   Hugh Dickins   memcg: more mem_c...
273
  		mem_cgroup_uncharge_start();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
274
275
  		for (i = 0; i < pagevec_count(&pvec); i++) {
  			struct page *page = pvec.pages[i];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
276

b85e0effd   Hugh Dickins   mm: consistent tr...
277
  			/* We rely upon deletion not changing page->index */
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
278
  			index = indices[i];
5a7203947   Lukas Czerner   mm: teach truncat...
279
  			if (index >= end)
d7339071f   Hans Reiser   [PATCH] reiser4: ...
280
  				break;
d7339071f   Hans Reiser   [PATCH] reiser4: ...
281

0cd6144aa   Johannes Weiner   mm + fs: prepare ...
282
283
284
285
  			if (radix_tree_exceptional_entry(page)) {
  				clear_exceptional_entry(mapping, index, page);
  				continue;
  			}
529ae9aaa   Nick Piggin   mm: rename page t...
286
  			if (!trylock_page(page))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
287
  				continue;
b85e0effd   Hugh Dickins   mm: consistent tr...
288
  			WARN_ON(page->index != index);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
289
290
291
292
  			if (PageWriteback(page)) {
  				unlock_page(page);
  				continue;
  			}
750b4987b   Nick Piggin   HWPOISON: Refacto...
293
  			truncate_inode_page(mapping, page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
294
295
  			unlock_page(page);
  		}
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
296
  		pagevec_remove_exceptionals(&pvec);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
297
  		pagevec_release(&pvec);
e5598f8bf   Hugh Dickins   memcg: more mem_c...
298
  		mem_cgroup_uncharge_end();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
299
  		cond_resched();
b85e0effd   Hugh Dickins   mm: consistent tr...
300
  		index++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
301
  	}
5a7203947   Lukas Czerner   mm: teach truncat...
302
  	if (partial_start) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
303
304
  		struct page *page = find_lock_page(mapping, start - 1);
  		if (page) {
5a7203947   Lukas Czerner   mm: teach truncat...
305
306
307
308
309
310
  			unsigned int top = PAGE_CACHE_SIZE;
  			if (start > end) {
  				/* Truncation within a single page */
  				top = partial_end;
  				partial_end = 0;
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
311
  			wait_on_page_writeback(page);
5a7203947   Lukas Czerner   mm: teach truncat...
312
313
314
315
316
  			zero_user_segment(page, partial_start, top);
  			cleancache_invalidate_page(mapping, page);
  			if (page_has_private(page))
  				do_invalidatepage(page, partial_start,
  						  top - partial_start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
317
318
319
320
  			unlock_page(page);
  			page_cache_release(page);
  		}
  	}
5a7203947   Lukas Czerner   mm: teach truncat...
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
  	if (partial_end) {
  		struct page *page = find_lock_page(mapping, end);
  		if (page) {
  			wait_on_page_writeback(page);
  			zero_user_segment(page, 0, partial_end);
  			cleancache_invalidate_page(mapping, page);
  			if (page_has_private(page))
  				do_invalidatepage(page, 0,
  						  partial_end);
  			unlock_page(page);
  			page_cache_release(page);
  		}
  	}
  	/*
  	 * If the truncation happened within a single page no pages
  	 * will be released, just zeroed, so we can bail out now.
  	 */
  	if (start >= end)
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
340

b85e0effd   Hugh Dickins   mm: consistent tr...
341
  	index = start;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
342
343
  	for ( ; ; ) {
  		cond_resched();
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
344
345
346
  		if (!pagevec_lookup_entries(&pvec, mapping, index,
  			min(end - index, (pgoff_t)PAGEVEC_SIZE),
  			indices)) {
b85e0effd   Hugh Dickins   mm: consistent tr...
347
  			if (index == start)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
348
  				break;
b85e0effd   Hugh Dickins   mm: consistent tr...
349
  			index = start;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
350
351
  			continue;
  		}
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
352
353
  		if (index == start && indices[0] >= end) {
  			pagevec_remove_exceptionals(&pvec);
d7339071f   Hans Reiser   [PATCH] reiser4: ...
354
355
356
  			pagevec_release(&pvec);
  			break;
  		}
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
357
  		mem_cgroup_uncharge_start();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
358
359
  		for (i = 0; i < pagevec_count(&pvec); i++) {
  			struct page *page = pvec.pages[i];
b85e0effd   Hugh Dickins   mm: consistent tr...
360
  			/* We rely upon deletion not changing page->index */
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
361
  			index = indices[i];
5a7203947   Lukas Czerner   mm: teach truncat...
362
  			if (index >= end)
d7339071f   Hans Reiser   [PATCH] reiser4: ...
363
  				break;
b85e0effd   Hugh Dickins   mm: consistent tr...
364

0cd6144aa   Johannes Weiner   mm + fs: prepare ...
365
366
367
368
  			if (radix_tree_exceptional_entry(page)) {
  				clear_exceptional_entry(mapping, index, page);
  				continue;
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
369
  			lock_page(page);
b85e0effd   Hugh Dickins   mm: consistent tr...
370
  			WARN_ON(page->index != index);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
371
  			wait_on_page_writeback(page);
750b4987b   Nick Piggin   HWPOISON: Refacto...
372
  			truncate_inode_page(mapping, page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
373
374
  			unlock_page(page);
  		}
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
375
  		pagevec_remove_exceptionals(&pvec);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
376
  		pagevec_release(&pvec);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
377
  		mem_cgroup_uncharge_end();
b85e0effd   Hugh Dickins   mm: consistent tr...
378
  		index++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
379
  	}
3167760f8   Dan Magenheimer   mm: cleancache: s...
380
  	cleancache_invalidate_inode(mapping);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
381
  }
d7339071f   Hans Reiser   [PATCH] reiser4: ...
382
  EXPORT_SYMBOL(truncate_inode_pages_range);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
383

d7339071f   Hans Reiser   [PATCH] reiser4: ...
384
385
386
387
388
  /**
   * truncate_inode_pages - truncate *all* the pages from an offset
   * @mapping: mapping to truncate
   * @lstart: offset from which to truncate
   *
1b1dcc1b5   Jes Sorensen   [PATCH] mutex sub...
389
   * Called under (and serialised by) inode->i_mutex.
08142579b   Jan Kara   mm: fix assertion...
390
391
392
393
394
   *
   * Note: When this function returns, there can be a page in the process of
   * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
   * mapping->nrpages can be non-zero when this function returns even after
   * truncation of the whole mapping.
d7339071f   Hans Reiser   [PATCH] reiser4: ...
395
396
397
398
399
   */
  void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  {
  	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
400
  EXPORT_SYMBOL(truncate_inode_pages);
286973552   Mike Waychison   mm: remove __inva...
401
  /**
91b0abe36   Johannes Weiner   mm + fs: store sh...
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
   * truncate_inode_pages_final - truncate *all* pages before inode dies
   * @mapping: mapping to truncate
   *
   * Called under (and serialized by) inode->i_mutex.
   *
   * Filesystems have to use this in the .evict_inode path to inform the
   * VM that this is the final truncate and the inode is going away.
   */
  void truncate_inode_pages_final(struct address_space *mapping)
  {
  	unsigned long nrshadows;
  	unsigned long nrpages;
  
  	/*
  	 * Page reclaim can not participate in regular inode lifetime
  	 * management (can't call iput()) and thus can race with the
  	 * inode teardown.  Tell it when the address space is exiting,
  	 * so that it does not install eviction information after the
  	 * final truncate has begun.
  	 */
  	mapping_set_exiting(mapping);
  
  	/*
  	 * When reclaim installs eviction entries, it increases
  	 * nrshadows first, then decreases nrpages.  Make sure we see
  	 * this in the right order or we might miss an entry.
  	 */
  	nrpages = mapping->nrpages;
  	smp_rmb();
  	nrshadows = mapping->nrshadows;
  
  	if (nrpages || nrshadows) {
  		/*
  		 * As truncation uses a lockless tree lookup, cycle
  		 * the tree lock to make sure any ongoing tree
  		 * modification that does not see AS_EXITING is
  		 * completed before starting the final truncate.
  		 */
  		spin_lock_irq(&mapping->tree_lock);
  		spin_unlock_irq(&mapping->tree_lock);
  
  		truncate_inode_pages(mapping, 0);
  	}
  }
  EXPORT_SYMBOL(truncate_inode_pages_final);
  
  /**
286973552   Mike Waychison   mm: remove __inva...
449
450
451
452
453
454
455
456
457
458
459
460
461
   * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
   * @mapping: the address_space which holds the pages to invalidate
   * @start: the offset 'from' which to invalidate
   * @end: the offset 'to' which to invalidate (inclusive)
   *
   * This function only removes the unlocked pages, if you want to
   * remove all the pages of one inode, you must call truncate_inode_pages.
   *
   * invalidate_mapping_pages() will not block on IO activity. It will not
   * invalidate pages which are dirty, locked, under writeback or mapped into
   * pagetables.
   */
  unsigned long invalidate_mapping_pages(struct address_space *mapping,
315601809   Minchan Kim   mm: deactivate in...
462
  		pgoff_t start, pgoff_t end)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
463
  {
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
464
  	pgoff_t indices[PAGEVEC_SIZE];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
465
  	struct pagevec pvec;
b85e0effd   Hugh Dickins   mm: consistent tr...
466
  	pgoff_t index = start;
315601809   Minchan Kim   mm: deactivate in...
467
468
  	unsigned long ret;
  	unsigned long count = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
469
  	int i;
31475dd61   Hugh Dickins   mm: a few small u...
470
471
472
473
474
475
476
  	/*
  	 * Note: this function may get called on a shmem/tmpfs mapping:
  	 * pagevec_lookup() might then return 0 prematurely (because it
  	 * got a gangful of swap entries); but it's hardly worth worrying
  	 * about - it can rarely have anything to free from such a mapping
  	 * (most pages are dirty), and already skips over any difficulties.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
477
  	pagevec_init(&pvec, 0);
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
478
479
480
  	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
  			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
  			indices)) {
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
481
  		mem_cgroup_uncharge_start();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
482
483
  		for (i = 0; i < pagevec_count(&pvec); i++) {
  			struct page *page = pvec.pages[i];
e0f23603f   NeilBrown   [PATCH] Remove se...
484

b85e0effd   Hugh Dickins   mm: consistent tr...
485
  			/* We rely upon deletion not changing page->index */
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
486
  			index = indices[i];
b85e0effd   Hugh Dickins   mm: consistent tr...
487
488
  			if (index > end)
  				break;
e0f23603f   NeilBrown   [PATCH] Remove se...
489

0cd6144aa   Johannes Weiner   mm + fs: prepare ...
490
491
492
493
  			if (radix_tree_exceptional_entry(page)) {
  				clear_exceptional_entry(mapping, index, page);
  				continue;
  			}
b85e0effd   Hugh Dickins   mm: consistent tr...
494
495
496
  			if (!trylock_page(page))
  				continue;
  			WARN_ON(page->index != index);
315601809   Minchan Kim   mm: deactivate in...
497
  			ret = invalidate_inode_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
498
  			unlock_page(page);
315601809   Minchan Kim   mm: deactivate in...
499
500
501
502
503
504
505
  			/*
  			 * Invalidation is a hint that the page is no longer
  			 * of interest and try to speed up its reclaim.
  			 */
  			if (!ret)
  				deactivate_page(page);
  			count += ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
506
  		}
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
507
  		pagevec_remove_exceptionals(&pvec);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
508
  		pagevec_release(&pvec);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
509
  		mem_cgroup_uncharge_end();
286973552   Mike Waychison   mm: remove __inva...
510
  		cond_resched();
b85e0effd   Hugh Dickins   mm: consistent tr...
511
  		index++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
512
  	}
315601809   Minchan Kim   mm: deactivate in...
513
  	return count;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
514
  }
54bc48552   Anton Altaparmakov   [PATCH] Export in...
515
  EXPORT_SYMBOL(invalidate_mapping_pages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
516

bd4c8ce41   Andrew Morton   [PATCH] invalidat...
517
518
519
520
  /*
   * This is like invalidate_complete_page(), except it ignores the page's
   * refcount.  We do this because invalidate_inode_pages2() needs stronger
   * invalidation guarantees, and cannot afford to leave pages behind because
2706a1b89   Anderson Briglia   vmscan: fix comme...
521
522
   * shrink_page_list() has a temp ref on them, or because they're transiently
   * sitting in the lru_cache_add() pagevecs.
bd4c8ce41   Andrew Morton   [PATCH] invalidat...
523
524
525
526
527
528
   */
  static int
  invalidate_complete_page2(struct address_space *mapping, struct page *page)
  {
  	if (page->mapping != mapping)
  		return 0;
266cf658e   David Howells   FS-Cache: Recruit...
529
  	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
bd4c8ce41   Andrew Morton   [PATCH] invalidat...
530
  		return 0;
19fd62312   Nick Piggin   mm: spinlock tree...
531
  	spin_lock_irq(&mapping->tree_lock);
bd4c8ce41   Andrew Morton   [PATCH] invalidat...
532
533
  	if (PageDirty(page))
  		goto failed;
266cf658e   David Howells   FS-Cache: Recruit...
534
  	BUG_ON(page_has_private(page));
91b0abe36   Johannes Weiner   mm + fs: store sh...
535
  	__delete_from_page_cache(page, NULL);
19fd62312   Nick Piggin   mm: spinlock tree...
536
  	spin_unlock_irq(&mapping->tree_lock);
e767e0561   Daisuke Nishimura   memcg: fix deadlo...
537
  	mem_cgroup_uncharge_cache_page(page);
6072d13c4   Linus Torvalds   Call the filesyst...
538
539
540
  
  	if (mapping->a_ops->freepage)
  		mapping->a_ops->freepage(page);
bd4c8ce41   Andrew Morton   [PATCH] invalidat...
541
542
543
  	page_cache_release(page);	/* pagecache ref */
  	return 1;
  failed:
19fd62312   Nick Piggin   mm: spinlock tree...
544
  	spin_unlock_irq(&mapping->tree_lock);
bd4c8ce41   Andrew Morton   [PATCH] invalidat...
545
546
  	return 0;
  }
e3db7691e   Trond Myklebust   [PATCH] NFS: Fix ...
547
548
549
550
551
552
553
554
  static int do_launder_page(struct address_space *mapping, struct page *page)
  {
  	if (!PageDirty(page))
  		return 0;
  	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
  		return 0;
  	return mapping->a_ops->launder_page(page);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
555
556
  /**
   * invalidate_inode_pages2_range - remove range of pages from an address_space
67be2dd1b   Martin Waitz   [PATCH] DocBook: ...
557
   * @mapping: the address_space
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
558
559
560
561
562
563
   * @start: the page offset 'from' which to invalidate
   * @end: the page offset 'to' which to invalidate (inclusive)
   *
   * Any pages which are found to be mapped into pagetables are unmapped prior to
   * invalidation.
   *
6ccfa806a   Hisashi Hifumi   VFS: fix dio writ...
564
   * Returns -EBUSY if any pages could not be invalidated.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
565
566
567
568
   */
  int invalidate_inode_pages2_range(struct address_space *mapping,
  				  pgoff_t start, pgoff_t end)
  {
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
569
  	pgoff_t indices[PAGEVEC_SIZE];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
570
  	struct pagevec pvec;
b85e0effd   Hugh Dickins   mm: consistent tr...
571
  	pgoff_t index;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
572
573
  	int i;
  	int ret = 0;
0dd1334fa   Hisashi Hifumi   fix invalidate_in...
574
  	int ret2 = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
575
  	int did_range_unmap = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
576

3167760f8   Dan Magenheimer   mm: cleancache: s...
577
  	cleancache_invalidate_inode(mapping);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
578
  	pagevec_init(&pvec, 0);
b85e0effd   Hugh Dickins   mm: consistent tr...
579
  	index = start;
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
580
581
582
  	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
  			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
  			indices)) {
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
583
  		mem_cgroup_uncharge_start();
7b965e088   Trond Myklebust   [PATCH] VM: inval...
584
  		for (i = 0; i < pagevec_count(&pvec); i++) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
585
  			struct page *page = pvec.pages[i];
b85e0effd   Hugh Dickins   mm: consistent tr...
586
587
  
  			/* We rely upon deletion not changing page->index */
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
588
  			index = indices[i];
b85e0effd   Hugh Dickins   mm: consistent tr...
589
590
  			if (index > end)
  				break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
591

0cd6144aa   Johannes Weiner   mm + fs: prepare ...
592
593
594
595
  			if (radix_tree_exceptional_entry(page)) {
  				clear_exceptional_entry(mapping, index, page);
  				continue;
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
596
  			lock_page(page);
b85e0effd   Hugh Dickins   mm: consistent tr...
597
  			WARN_ON(page->index != index);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
598
599
600
601
  			if (page->mapping != mapping) {
  				unlock_page(page);
  				continue;
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
602
  			wait_on_page_writeback(page);
d00806b18   Nick Piggin   mm: fix fault vs ...
603
  			if (page_mapped(page)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
604
605
606
607
608
  				if (!did_range_unmap) {
  					/*
  					 * Zap the rest of the file in one hit.
  					 */
  					unmap_mapping_range(mapping,
b85e0effd   Hugh Dickins   mm: consistent tr...
609
610
611
  					   (loff_t)index << PAGE_CACHE_SHIFT,
  					   (loff_t)(1 + end - index)
  							 << PAGE_CACHE_SHIFT,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
612
613
614
615
616
617
618
  					    0);
  					did_range_unmap = 1;
  				} else {
  					/*
  					 * Just zap this page
  					 */
  					unmap_mapping_range(mapping,
b85e0effd   Hugh Dickins   mm: consistent tr...
619
620
  					   (loff_t)index << PAGE_CACHE_SHIFT,
  					   PAGE_CACHE_SIZE, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
621
622
  				}
  			}
d00806b18   Nick Piggin   mm: fix fault vs ...
623
  			BUG_ON(page_mapped(page));
0dd1334fa   Hisashi Hifumi   fix invalidate_in...
624
625
626
  			ret2 = do_launder_page(mapping, page);
  			if (ret2 == 0) {
  				if (!invalidate_complete_page2(mapping, page))
6ccfa806a   Hisashi Hifumi   VFS: fix dio writ...
627
  					ret2 = -EBUSY;
0dd1334fa   Hisashi Hifumi   fix invalidate_in...
628
629
630
  			}
  			if (ret2 < 0)
  				ret = ret2;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
631
632
  			unlock_page(page);
  		}
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
633
  		pagevec_remove_exceptionals(&pvec);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
634
  		pagevec_release(&pvec);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
635
  		mem_cgroup_uncharge_end();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
636
  		cond_resched();
b85e0effd   Hugh Dickins   mm: consistent tr...
637
  		index++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
638
  	}
3167760f8   Dan Magenheimer   mm: cleancache: s...
639
  	cleancache_invalidate_inode(mapping);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
640
641
642
643
644
645
  	return ret;
  }
  EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  
  /**
   * invalidate_inode_pages2 - remove all pages from an address_space
67be2dd1b   Martin Waitz   [PATCH] DocBook: ...
646
   * @mapping: the address_space
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
647
648
649
650
   *
   * Any pages which are found to be mapped into pagetables are unmapped prior to
   * invalidation.
   *
e9de25dda   Peng Tao   mm: fix comments ...
651
   * Returns -EBUSY if any pages could not be invalidated.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
652
653
654
655
656
657
   */
  int invalidate_inode_pages2(struct address_space *mapping)
  {
  	return invalidate_inode_pages2_range(mapping, 0, -1);
  }
  EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
25d9e2d15   npiggin@suse.de   truncate: new hel...
658
659
660
661
  
  /**
   * truncate_pagecache - unmap and remove pagecache that has been truncated
   * @inode: inode
8a549bea5   Hugh Dickins   mm: tidy vmtrunca...
662
   * @newsize: new file size
25d9e2d15   npiggin@suse.de   truncate: new hel...
663
664
665
666
667
668
669
670
671
672
673
   *
   * inode's new i_size must already be written before truncate_pagecache
   * is called.
   *
   * This function should typically be called before the filesystem
   * releases resources associated with the freed range (eg. deallocates
   * blocks). This way, pagecache will always stay logically coherent
   * with on-disk format, and the filesystem would not have to deal with
   * situations such as writepage being called for a page that has already
   * had its underlying blocks deallocated.
   */
7caef2676   Kirill A. Shutemov   truncate: drop 'o...
674
  void truncate_pagecache(struct inode *inode, loff_t newsize)
25d9e2d15   npiggin@suse.de   truncate: new hel...
675
  {
cedabed49   OGAWA Hirofumi   vfs: Fix vmtrunca...
676
  	struct address_space *mapping = inode->i_mapping;
8a549bea5   Hugh Dickins   mm: tidy vmtrunca...
677
  	loff_t holebegin = round_up(newsize, PAGE_SIZE);
cedabed49   OGAWA Hirofumi   vfs: Fix vmtrunca...
678
679
680
681
682
683
684
685
686
687
  
  	/*
  	 * unmap_mapping_range is called twice, first simply for
  	 * efficiency so that truncate_inode_pages does fewer
  	 * single-page unmaps.  However after this first call, and
  	 * before truncate_inode_pages finishes, it is possible for
  	 * private pages to be COWed, which remain after
  	 * truncate_inode_pages finishes, hence the second
  	 * unmap_mapping_range call must be made for correctness.
  	 */
8a549bea5   Hugh Dickins   mm: tidy vmtrunca...
688
689
690
  	unmap_mapping_range(mapping, holebegin, 0, 1);
  	truncate_inode_pages(mapping, newsize);
  	unmap_mapping_range(mapping, holebegin, 0, 1);
25d9e2d15   npiggin@suse.de   truncate: new hel...
691
692
693
694
  }
  EXPORT_SYMBOL(truncate_pagecache);
  
  /**
2c27c65ed   Christoph Hellwig   check ATTR_SIZE c...
695
696
697
698
   * truncate_setsize - update inode and pagecache for a new file size
   * @inode: inode
   * @newsize: new file size
   *
382e27daa   Jan Kara   mm: fix truncate_...
699
700
701
   * truncate_setsize updates i_size and performs pagecache truncation (if
   * necessary) to @newsize. It will be typically be called from the filesystem's
   * setattr function when ATTR_SIZE is passed in.
2c27c65ed   Christoph Hellwig   check ATTR_SIZE c...
702
   *
382e27daa   Jan Kara   mm: fix truncate_...
703
704
   * Must be called with inode_mutex held and before all filesystem specific
   * block truncation has been performed.
2c27c65ed   Christoph Hellwig   check ATTR_SIZE c...
705
706
707
   */
  void truncate_setsize(struct inode *inode, loff_t newsize)
  {
2c27c65ed   Christoph Hellwig   check ATTR_SIZE c...
708
  	i_size_write(inode, newsize);
7caef2676   Kirill A. Shutemov   truncate: drop 'o...
709
  	truncate_pagecache(inode, newsize);
2c27c65ed   Christoph Hellwig   check ATTR_SIZE c...
710
711
712
713
  }
  EXPORT_SYMBOL(truncate_setsize);
  
  /**
623e3db9f   Hugh Dickins   mm for fs: add tr...
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
   * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
   * @inode: inode
   * @lstart: offset of beginning of hole
   * @lend: offset of last byte of hole
   *
   * This function should typically be called before the filesystem
   * releases resources associated with the freed range (eg. deallocates
   * blocks). This way, pagecache will always stay logically coherent
   * with on-disk format, and the filesystem would not have to deal with
   * situations such as writepage being called for a page that has already
   * had its underlying blocks deallocated.
   */
  void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
  {
  	struct address_space *mapping = inode->i_mapping;
  	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
  	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
  	/*
  	 * This rounding is currently just for example: unmap_mapping_range
  	 * expands its hole outwards, whereas we want it to contract the hole
  	 * inwards.  However, existing callers of truncate_pagecache_range are
5a7203947   Lukas Czerner   mm: teach truncat...
735
736
  	 * doing their own page rounding first.  Note that unmap_mapping_range
  	 * allows holelen 0 for all, and we allow lend -1 for end of file.
623e3db9f   Hugh Dickins   mm for fs: add tr...
737
738
739
740
741
742
743
744
745
746
747
748
749
  	 */
  
  	/*
  	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
  	 * once (before truncating pagecache), and without "even_cows" flag:
  	 * hole-punching should not remove private COWed pages from the hole.
  	 */
  	if ((u64)unmap_end > (u64)unmap_start)
  		unmap_mapping_range(mapping, unmap_start,
  				    1 + unmap_end - unmap_start, 0);
  	truncate_inode_pages_range(mapping, lstart, lend);
  }
  EXPORT_SYMBOL(truncate_pagecache_range);