Blame view

include/linux/pagemap.h 13.7 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
11
12
13
  #ifndef _LINUX_PAGEMAP_H
  #define _LINUX_PAGEMAP_H
  
  /*
   * Copyright 1995 Linus Torvalds
   */
  #include <linux/mm.h>
  #include <linux/fs.h>
  #include <linux/list.h>
  #include <linux/highmem.h>
  #include <linux/compiler.h>
  #include <asm/uaccess.h>
  #include <linux/gfp.h>
3e9f45bd1   Guillaume Chazarain   Factor outstandin...
14
  #include <linux/bitops.h>
e286781d5   Nick Piggin   mm: speculative p...
15
  #include <linux/hardirq.h> /* for in_interrupt() */
8edf344c6   Naoya Horiguchi   hugetlb: move def...
16
  #include <linux/hugetlb_inline.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17
18
19
20
21
  
  /*
   * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
   * allocation mode flags.
   */
9a896c9a4   Lee Schermerhorn   mm: define a UNIQ...
22
23
24
25
  enum mapping_flags {
  	AS_EIO		= __GFP_BITS_SHIFT + 0,	/* IO error on async write */
  	AS_ENOSPC	= __GFP_BITS_SHIFT + 1,	/* ENOSPC on async write */
  	AS_MM_ALL_LOCKS	= __GFP_BITS_SHIFT + 2,	/* under mm_take_all_locks() */
9a896c9a4   Lee Schermerhorn   mm: define a UNIQ...
26
  	AS_UNEVICTABLE	= __GFP_BITS_SHIFT + 3,	/* e.g., ramdisk, SHM_LOCK */
9a896c9a4   Lee Schermerhorn   mm: define a UNIQ...
27
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
28

3e9f45bd1   Guillaume Chazarain   Factor outstandin...
29
30
  static inline void mapping_set_error(struct address_space *mapping, int error)
  {
2185e69f6   Andrew Morton   mapping_set_error...
31
  	if (unlikely(error)) {
3e9f45bd1   Guillaume Chazarain   Factor outstandin...
32
33
34
35
36
37
  		if (error == -ENOSPC)
  			set_bit(AS_ENOSPC, &mapping->flags);
  		else
  			set_bit(AS_EIO, &mapping->flags);
  	}
  }
ba9ddf493   Lee Schermerhorn   Ramfs and Ram Dis...
38
39
40
41
  static inline void mapping_set_unevictable(struct address_space *mapping)
  {
  	set_bit(AS_UNEVICTABLE, &mapping->flags);
  }
89e004ea5   Lee Schermerhorn   SHM_LOCKED pages ...
42
43
44
45
  static inline void mapping_clear_unevictable(struct address_space *mapping)
  {
  	clear_bit(AS_UNEVICTABLE, &mapping->flags);
  }
ba9ddf493   Lee Schermerhorn   Ramfs and Ram Dis...
46
47
  static inline int mapping_unevictable(struct address_space *mapping)
  {
088e54658   Steven Rostedt   mm: remove likely...
48
  	if (mapping)
89e004ea5   Lee Schermerhorn   SHM_LOCKED pages ...
49
50
  		return test_bit(AS_UNEVICTABLE, &mapping->flags);
  	return !!mapping;
ba9ddf493   Lee Schermerhorn   Ramfs and Ram Dis...
51
  }
ba9ddf493   Lee Schermerhorn   Ramfs and Ram Dis...
52

dd0fc66fb   Al Viro   [PATCH] gfp flags...
53
  static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
54
  {
260b23674   Al Viro   [PATCH] gfp_t: th...
55
  	return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
56
57
58
59
60
61
  }
  
  /*
   * This is non-atomic.  Only to be used before the mapping is activated.
   * Probably needs a barrier...
   */
260b23674   Al Viro   [PATCH] gfp_t: th...
62
  static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
63
  {
260b23674   Al Viro   [PATCH] gfp_t: th...
64
65
  	m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
  				(__force unsigned long)mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
  }
  
  /*
   * The page cache can done in larger chunks than
   * one page, because it allows for more efficient
   * throughput (it can then be mapped into user
   * space in smaller chunks for same flexibility).
   *
   * Or rather, it _will_ be done in larger chunks.
   */
  #define PAGE_CACHE_SHIFT	PAGE_SHIFT
  #define PAGE_CACHE_SIZE		PAGE_SIZE
  #define PAGE_CACHE_MASK		PAGE_MASK
  #define PAGE_CACHE_ALIGN(addr)	(((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
  
  #define page_cache_get(page)		get_page(page)
  #define page_cache_release(page)	put_page(page)
  void release_pages(struct page **pages, int nr, int cold);
e286781d5   Nick Piggin   mm: speculative p...
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
  /*
   * speculatively take a reference to a page.
   * If the page is free (_count == 0), then _count is untouched, and 0
   * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
   *
   * This function must be called inside the same rcu_read_lock() section as has
   * been used to lookup the page in the pagecache radix-tree (or page table):
   * this allows allocators to use a synchronize_rcu() to stabilize _count.
   *
   * Unless an RCU grace period has passed, the count of all pages coming out
   * of the allocator must be considered unstable. page_count may return higher
   * than expected, and put_page must be able to do the right thing when the
   * page has been finished with, no matter what it is subsequently allocated
   * for (because put_page is what is used here to drop an invalid speculative
   * reference).
   *
   * This is the interesting part of the lockless pagecache (and lockless
   * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
   * has the following pattern:
   * 1. find page in radix tree
   * 2. conditionally increment refcount
   * 3. check the page is still in pagecache (if no, goto 1)
   *
   * Remove-side that cares about stability of _count (eg. reclaim) has the
   * following (with tree_lock held for write):
   * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
   * B. remove page from pagecache
   * C. free the page
   *
   * There are 2 critical interleavings that matter:
   * - 2 runs before A: in this case, A sees elevated refcount and bails out
   * - A runs before 2: in this case, 2 sees zero refcount and retries;
   *   subsequently, B will complete and 1 will find no page, causing the
   *   lookup to return NULL.
   *
   * It is possible that between 1 and 2, the page is removed then the exact same
   * page is inserted into the same position in pagecache. That's OK: the
   * old find_get_page using tree_lock could equally have run before or after
   * such a re-insertion, depending on order that locks are granted.
   *
   * Lookups racing against pagecache insertion isn't a big problem: either 1
   * will find the page or it will not. Likewise, the old find_get_page could run
   * either before the insertion or afterwards, depending on timing.
   */
  static inline int page_cache_get_speculative(struct page *page)
  {
  	VM_BUG_ON(in_interrupt());
b560d8ad8   Paul E. McKenney   rcu: Expunge ling...
131
  #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
e286781d5   Nick Piggin   mm: speculative p...
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
  # ifdef CONFIG_PREEMPT
  	VM_BUG_ON(!in_atomic());
  # endif
  	/*
  	 * Preempt must be disabled here - we rely on rcu_read_lock doing
  	 * this for us.
  	 *
  	 * Pagecache won't be truncated from interrupt context, so if we have
  	 * found a page in the radix tree here, we have pinned its refcount by
  	 * disabling preempt, and hence no need for the "speculative get" that
  	 * SMP requires.
  	 */
  	VM_BUG_ON(page_count(page) == 0);
  	atomic_inc(&page->_count);
  
  #else
  	if (unlikely(!get_page_unless_zero(page))) {
  		/*
  		 * Either the page has been freed, or will be freed.
  		 * In either case, retry here and the caller should
  		 * do the right thing (see comments above).
  		 */
  		return 0;
  	}
  #endif
  	VM_BUG_ON(PageTail(page));
  
  	return 1;
  }
ce0ad7f09   Nick Piggin   powerpc/mm: Lockl...
161
162
163
164
165
166
  /*
   * Same as above, but add instead of inc (could just be merged)
   */
  static inline int page_cache_add_speculative(struct page *page, int count)
  {
  	VM_BUG_ON(in_interrupt());
b560d8ad8   Paul E. McKenney   rcu: Expunge ling...
167
  #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
ce0ad7f09   Nick Piggin   powerpc/mm: Lockl...
168
169
170
171
172
173
174
175
176
177
178
179
180
181
  # ifdef CONFIG_PREEMPT
  	VM_BUG_ON(!in_atomic());
  # endif
  	VM_BUG_ON(page_count(page) == 0);
  	atomic_add(count, &page->_count);
  
  #else
  	if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
  		return 0;
  #endif
  	VM_BUG_ON(PageCompound(page) && page != compound_head(page));
  
  	return 1;
  }
e286781d5   Nick Piggin   mm: speculative p...
182
183
184
185
186
187
188
189
190
191
192
193
  static inline int page_freeze_refs(struct page *page, int count)
  {
  	return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
  }
  
  static inline void page_unfreeze_refs(struct page *page, int count)
  {
  	VM_BUG_ON(page_count(page) != 0);
  	VM_BUG_ON(count == 0);
  
  	atomic_set(&page->_count, count);
  }
44110fe38   Paul Jackson   [PATCH] cpuset me...
194
  #ifdef CONFIG_NUMA
2ae88149a   Nick Piggin   [PATCH] mm: clean...
195
  extern struct page *__page_cache_alloc(gfp_t gfp);
44110fe38   Paul Jackson   [PATCH] cpuset me...
196
  #else
2ae88149a   Nick Piggin   [PATCH] mm: clean...
197
198
199
200
201
  static inline struct page *__page_cache_alloc(gfp_t gfp)
  {
  	return alloc_pages(gfp, 0);
  }
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
202
203
  static inline struct page *page_cache_alloc(struct address_space *x)
  {
2ae88149a   Nick Piggin   [PATCH] mm: clean...
204
  	return __page_cache_alloc(mapping_gfp_mask(x));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
205
206
207
208
  }
  
  static inline struct page *page_cache_alloc_cold(struct address_space *x)
  {
2ae88149a   Nick Piggin   [PATCH] mm: clean...
209
  	return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
210
211
212
213
214
  }
  
  typedef int filler_t(void *, struct page *);
  
  extern struct page * find_get_page(struct address_space *mapping,
57f6b96c0   Fengguang Wu   filemap: convert ...
215
  				pgoff_t index);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
216
  extern struct page * find_lock_page(struct address_space *mapping,
57f6b96c0   Fengguang Wu   filemap: convert ...
217
  				pgoff_t index);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218
  extern struct page * find_or_create_page(struct address_space *mapping,
57f6b96c0   Fengguang Wu   filemap: convert ...
219
  				pgoff_t index, gfp_t gfp_mask);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
220
221
  unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
  			unsigned int nr_pages, struct page **pages);
ebf43500e   Jens Axboe   [PATCH] Add find_...
222
223
  unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
  			       unsigned int nr_pages, struct page **pages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
224
225
  unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
  			int tag, unsigned int nr_pages, struct page **pages);
54566b2c1   Nick Piggin   fs: symlink write...
226
227
  struct page *grab_cache_page_write_begin(struct address_space *mapping,
  			pgoff_t index, unsigned flags);
afddba49d   Nick Piggin   fs: introduce wri...
228

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
229
230
231
  /*
   * Returns locked page at given index in given cache, creating it if needed.
   */
57f6b96c0   Fengguang Wu   filemap: convert ...
232
233
  static inline struct page *grab_cache_page(struct address_space *mapping,
  								pgoff_t index)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
234
235
236
237
238
  {
  	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  }
  
  extern struct page * grab_cache_page_nowait(struct address_space *mapping,
57f6b96c0   Fengguang Wu   filemap: convert ...
239
  				pgoff_t index);
6fe6900e1   Nick Piggin   mm: make read_cac...
240
  extern struct page * read_cache_page_async(struct address_space *mapping,
57f6b96c0   Fengguang Wu   filemap: convert ...
241
  				pgoff_t index, filler_t *filler,
6fe6900e1   Nick Piggin   mm: make read_cac...
242
  				void *data);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
243
  extern struct page * read_cache_page(struct address_space *mapping,
57f6b96c0   Fengguang Wu   filemap: convert ...
244
  				pgoff_t index, filler_t *filler,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
245
  				void *data);
0531b2aac   Linus Torvalds   mm: add new 'read...
246
247
  extern struct page * read_cache_page_gfp(struct address_space *mapping,
  				pgoff_t index, gfp_t gfp_mask);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
248
249
  extern int read_cache_pages(struct address_space *mapping,
  		struct list_head *pages, filler_t *filler, void *data);
6fe6900e1   Nick Piggin   mm: make read_cac...
250
251
  static inline struct page *read_mapping_page_async(
  						struct address_space *mapping,
57f6b96c0   Fengguang Wu   filemap: convert ...
252
  						     pgoff_t index, void *data)
6fe6900e1   Nick Piggin   mm: make read_cac...
253
254
255
256
  {
  	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  	return read_cache_page_async(mapping, index, filler, data);
  }
090d2b185   Pekka Enberg   [PATCH] read_mapp...
257
  static inline struct page *read_mapping_page(struct address_space *mapping,
57f6b96c0   Fengguang Wu   filemap: convert ...
258
  					     pgoff_t index, void *data)
090d2b185   Pekka Enberg   [PATCH] read_mapp...
259
260
261
262
  {
  	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  	return read_cache_page(mapping, index, filler, data);
  }
e286781d5   Nick Piggin   mm: speculative p...
263
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
264
265
266
267
268
269
   * Return byte-offset into filesystem object for page.
   */
  static inline loff_t page_offset(struct page *page)
  {
  	return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
  }
0fe6e20b9   Naoya Horiguchi   hugetlb, rmap: ad...
270
271
  extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  				     unsigned long address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
272
273
274
  static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  					unsigned long address)
  {
0fe6e20b9   Naoya Horiguchi   hugetlb, rmap: ad...
275
276
277
278
  	pgoff_t pgoff;
  	if (unlikely(is_vm_hugetlb_page(vma)))
  		return linear_hugepage_index(vma, address);
  	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
279
280
281
  	pgoff += vma->vm_pgoff;
  	return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  }
b3c975286   Harvey Harrison   include/linux: Re...
282
283
284
  extern void __lock_page(struct page *page);
  extern int __lock_page_killable(struct page *page);
  extern void __lock_page_nosync(struct page *page);
d065bd810   Michel Lespinasse   mm: retry page fa...
285
286
  extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
  				unsigned int flags);
b3c975286   Harvey Harrison   include/linux: Re...
287
  extern void unlock_page(struct page *page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
288

f45840b5c   Nick Piggin   mm: pagecache ins...
289
  static inline void __set_page_locked(struct page *page)
529ae9aaa   Nick Piggin   mm: rename page t...
290
  {
f45840b5c   Nick Piggin   mm: pagecache ins...
291
  	__set_bit(PG_locked, &page->flags);
529ae9aaa   Nick Piggin   mm: rename page t...
292
  }
f45840b5c   Nick Piggin   mm: pagecache ins...
293
  static inline void __clear_page_locked(struct page *page)
529ae9aaa   Nick Piggin   mm: rename page t...
294
  {
f45840b5c   Nick Piggin   mm: pagecache ins...
295
  	__clear_bit(PG_locked, &page->flags);
529ae9aaa   Nick Piggin   mm: rename page t...
296
297
298
299
  }
  
  static inline int trylock_page(struct page *page)
  {
8413ac9d8   Nick Piggin   mm: page lock use...
300
  	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
529ae9aaa   Nick Piggin   mm: rename page t...
301
  }
db37648cd   Nick Piggin   [PATCH] mm: non s...
302
303
304
  /*
   * lock_page may only be called if we have the page's inode pinned.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
305
306
307
  static inline void lock_page(struct page *page)
  {
  	might_sleep();
529ae9aaa   Nick Piggin   mm: rename page t...
308
  	if (!trylock_page(page))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
309
310
  		__lock_page(page);
  }
db37648cd   Nick Piggin   [PATCH] mm: non s...
311
312
  
  /*
2687a3569   Matthew Wilcox   Add lock_page_kil...
313
314
315
316
317
318
319
   * lock_page_killable is like lock_page but can be interrupted by fatal
   * signals.  It returns 0 if it locked the page and -EINTR if it was
   * killed while waiting.
   */
  static inline int lock_page_killable(struct page *page)
  {
  	might_sleep();
529ae9aaa   Nick Piggin   mm: rename page t...
320
  	if (!trylock_page(page))
2687a3569   Matthew Wilcox   Add lock_page_kil...
321
322
323
324
325
  		return __lock_page_killable(page);
  	return 0;
  }
  
  /*
db37648cd   Nick Piggin   [PATCH] mm: non s...
326
327
328
329
330
331
   * lock_page_nosync should only be used if we can't pin the page's inode.
   * Doesn't play quite so well with block device plugging.
   */
  static inline void lock_page_nosync(struct page *page)
  {
  	might_sleep();
529ae9aaa   Nick Piggin   mm: rename page t...
332
  	if (!trylock_page(page))
db37648cd   Nick Piggin   [PATCH] mm: non s...
333
334
  		__lock_page_nosync(page);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
335
336
  	
  /*
d065bd810   Michel Lespinasse   mm: retry page fa...
337
338
339
340
341
342
343
344
345
346
347
   * lock_page_or_retry - Lock the page, unless this would block and the
   * caller indicated that it can handle a retry.
   */
  static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
  				     unsigned int flags)
  {
  	might_sleep();
  	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
348
349
350
   * This is exported only for wait_on_page_locked/wait_on_page_writeback.
   * Never use this directly!
   */
b3c975286   Harvey Harrison   include/linux: Re...
351
  extern void wait_on_page_bit(struct page *page, int bit_nr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
  
  /* 
   * Wait for a page to be unlocked.
   *
   * This must be called with the caller "holding" the page,
   * ie with increased "page->count" so that the page won't
   * go away during the wait..
   */
  static inline void wait_on_page_locked(struct page *page)
  {
  	if (PageLocked(page))
  		wait_on_page_bit(page, PG_locked);
  }
  
  /* 
   * Wait for a page to complete writeback
   */
  static inline void wait_on_page_writeback(struct page *page)
  {
  	if (PageWriteback(page))
  		wait_on_page_bit(page, PG_writeback);
  }
  
  extern void end_page_writeback(struct page *page);
  
  /*
385e1ca5f   David Howells   CacheFiles: Permi...
378
379
380
381
382
   * Add an arbitrary waiter to a page's wait queue
   */
  extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
383
384
385
386
387
388
389
390
   * Fault a userspace page into pagetables.  Return non-zero on a fault.
   *
   * This assumes that two userspace pages are always sufficient.  That's
   * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
   */
  static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  {
  	int ret;
08291429c   Nick Piggin   mm: fix pagecache...
391
392
  	if (unlikely(size == 0))
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
  	/*
  	 * Writing zeroes into userspace here is OK, because we know that if
  	 * the zero gets there, we'll be overwriting it.
  	 */
  	ret = __put_user(0, uaddr);
  	if (ret == 0) {
  		char __user *end = uaddr + size - 1;
  
  		/*
  		 * If the page was already mapped, this will get a cache miss
  		 * for sure, so try to avoid doing it.
  		 */
  		if (((unsigned long)uaddr & PAGE_MASK) !=
  				((unsigned long)end & PAGE_MASK))
  		 	ret = __put_user(0, end);
  	}
  	return ret;
  }
08291429c   Nick Piggin   mm: fix pagecache...
411
  static inline int fault_in_pages_readable(const char __user *uaddr, int size)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
412
413
414
  {
  	volatile char c;
  	int ret;
08291429c   Nick Piggin   mm: fix pagecache...
415
416
  	if (unlikely(size == 0))
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
417
418
419
420
421
  	ret = __get_user(c, uaddr);
  	if (ret == 0) {
  		const char __user *end = uaddr + size - 1;
  
  		if (((unsigned long)uaddr & PAGE_MASK) !=
627295e49   Andi Kleen   gcc-4.6: pagemap:...
422
  				((unsigned long)end & PAGE_MASK)) {
08291429c   Nick Piggin   mm: fix pagecache...
423
  		 	ret = __get_user(c, end);
627295e49   Andi Kleen   gcc-4.6: pagemap:...
424
425
  			(void)c;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
426
  	}
08291429c   Nick Piggin   mm: fix pagecache...
427
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
428
  }
529ae9aaa   Nick Piggin   mm: rename page t...
429
430
431
432
433
434
435
436
437
  int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
  				pgoff_t index, gfp_t gfp_mask);
  int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  				pgoff_t index, gfp_t gfp_mask);
  extern void remove_from_page_cache(struct page *page);
  extern void __remove_from_page_cache(struct page *page);
  
  /*
   * Like add_to_page_cache_locked, but used to add newly allocated pages:
f45840b5c   Nick Piggin   mm: pagecache ins...
438
   * the page is new, so we can just run __set_page_locked() against it.
529ae9aaa   Nick Piggin   mm: rename page t...
439
440
441
442
443
   */
  static inline int add_to_page_cache(struct page *page,
  		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
  {
  	int error;
f45840b5c   Nick Piggin   mm: pagecache ins...
444
  	__set_page_locked(page);
529ae9aaa   Nick Piggin   mm: rename page t...
445
446
  	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
  	if (unlikely(error))
f45840b5c   Nick Piggin   mm: pagecache ins...
447
  		__clear_page_locked(page);
529ae9aaa   Nick Piggin   mm: rename page t...
448
449
  	return error;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
450
  #endif /* _LINUX_PAGEMAP_H */