Blame view

include/linux/pagemap.h 18.7 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
6
7
8
9
10
11
12
  #ifndef _LINUX_PAGEMAP_H
  #define _LINUX_PAGEMAP_H
  
  /*
   * Copyright 1995 Linus Torvalds
   */
  #include <linux/mm.h>
  #include <linux/fs.h>
  #include <linux/list.h>
  #include <linux/highmem.h>
  #include <linux/compiler.h>
7c0f6ba68   Linus Torvalds   Replace <asm/uacc...
13
  #include <linux/uaccess.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
  #include <linux/gfp.h>
3e9f45bd1   Guillaume Chazarain   Factor outstandin...
15
  #include <linux/bitops.h>
e286781d5   Nick Piggin   mm: speculative p...
16
  #include <linux/hardirq.h> /* for in_interrupt() */
8edf344c6   Naoya Horiguchi   hugetlb: move def...
17
  #include <linux/hugetlb_inline.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
19
  
  /*
9c5d760b8   Michal Hocko   mm: split gfp_mas...
20
   * Bits in mapping->flags.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
21
   */
9a896c9a4   Lee Schermerhorn   mm: define a UNIQ...
22
  enum mapping_flags {
9c5d760b8   Michal Hocko   mm: split gfp_mas...
23
24
25
26
27
  	AS_EIO		= 0,	/* IO error on async write */
  	AS_ENOSPC	= 1,	/* ENOSPC on async write */
  	AS_MM_ALL_LOCKS	= 2,	/* under mm_take_all_locks() */
  	AS_UNEVICTABLE	= 3,	/* e.g., ramdisk, SHM_LOCK */
  	AS_EXITING	= 4, 	/* final truncate in progress */
371a096ed   Huang Ying   mm: don't use rad...
28
  	/* writeback related tags are not used */
9c5d760b8   Michal Hocko   mm: split gfp_mas...
29
  	AS_NO_WRITEBACK_TAGS = 5,
9a896c9a4   Lee Schermerhorn   mm: define a UNIQ...
30
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
31

8ed1e46aa   Jeff Layton   mm: set both AS_E...
32
33
34
35
36
37
38
39
40
41
42
43
44
45
  /**
   * mapping_set_error - record a writeback error in the address_space
   * @mapping - the mapping in which an error should be set
   * @error - the error to set in the mapping
   *
   * When writeback fails in some way, we must record that error so that
   * userspace can be informed when fsync and the like are called.  We endeavor
   * to report errors on any file that was open at the time of the error.  Some
   * internal callers also need to know when writeback errors have occurred.
   *
   * When a writeback error occurs, most filesystems will want to call
   * mapping_set_error to record the error in the mapping so that it can be
   * reported when the application calls fsync(2).
   */
3e9f45bd1   Guillaume Chazarain   Factor outstandin...
46
47
  static inline void mapping_set_error(struct address_space *mapping, int error)
  {
8ed1e46aa   Jeff Layton   mm: set both AS_E...
48
49
50
51
52
53
54
55
56
57
58
  	if (likely(!error))
  		return;
  
  	/* Record in wb_err for checkers using errseq_t based tracking */
  	filemap_set_wb_err(mapping, error);
  
  	/* Record it in flags for now, for legacy callers */
  	if (error == -ENOSPC)
  		set_bit(AS_ENOSPC, &mapping->flags);
  	else
  		set_bit(AS_EIO, &mapping->flags);
3e9f45bd1   Guillaume Chazarain   Factor outstandin...
59
  }
ba9ddf493   Lee Schermerhorn   Ramfs and Ram Dis...
60
61
62
63
  static inline void mapping_set_unevictable(struct address_space *mapping)
  {
  	set_bit(AS_UNEVICTABLE, &mapping->flags);
  }
89e004ea5   Lee Schermerhorn   SHM_LOCKED pages ...
64
65
66
67
  static inline void mapping_clear_unevictable(struct address_space *mapping)
  {
  	clear_bit(AS_UNEVICTABLE, &mapping->flags);
  }
ba9ddf493   Lee Schermerhorn   Ramfs and Ram Dis...
68
69
  static inline int mapping_unevictable(struct address_space *mapping)
  {
088e54658   Steven Rostedt   mm: remove likely...
70
  	if (mapping)
89e004ea5   Lee Schermerhorn   SHM_LOCKED pages ...
71
72
  		return test_bit(AS_UNEVICTABLE, &mapping->flags);
  	return !!mapping;
ba9ddf493   Lee Schermerhorn   Ramfs and Ram Dis...
73
  }
ba9ddf493   Lee Schermerhorn   Ramfs and Ram Dis...
74

91b0abe36   Johannes Weiner   mm + fs: store sh...
75
76
77
78
79
80
81
82
83
  static inline void mapping_set_exiting(struct address_space *mapping)
  {
  	set_bit(AS_EXITING, &mapping->flags);
  }
  
  static inline int mapping_exiting(struct address_space *mapping)
  {
  	return test_bit(AS_EXITING, &mapping->flags);
  }
371a096ed   Huang Ying   mm: don't use rad...
84
85
86
87
88
89
90
91
92
  static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
  {
  	set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  }
  
  static inline int mapping_use_writeback_tags(struct address_space *mapping)
  {
  	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  }
dd0fc66fb   Al Viro   [PATCH] gfp flags...
93
  static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
94
  {
9c5d760b8   Michal Hocko   mm: split gfp_mas...
95
  	return mapping->gfp_mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
96
  }
c62d25556   Michal Hocko   mm, fs: introduce...
97
98
99
100
101
102
  /* Restricts the given gfp_mask to what the mapping allows. */
  static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
  		gfp_t gfp_mask)
  {
  	return mapping_gfp_mask(mapping) & gfp_mask;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
103
104
105
106
  /*
   * This is non-atomic.  Only to be used before the mapping is activated.
   * Probably needs a barrier...
   */
260b23674   Al Viro   [PATCH] gfp_t: th...
107
  static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
108
  {
9c5d760b8   Michal Hocko   mm: split gfp_mas...
109
  	m->gfp_mask = mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
110
  }
b745bc85f   Mel Gorman   mm: page_alloc: c...
111
  void release_pages(struct page **pages, int nr, bool cold);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
112

e286781d5   Nick Piggin   mm: speculative p...
113
114
  /*
   * speculatively take a reference to a page.
0139aa7b7   Joonsoo Kim   mm: rename _count...
115
116
   * If the page is free (_refcount == 0), then _refcount is untouched, and 0
   * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
e286781d5   Nick Piggin   mm: speculative p...
117
118
119
   *
   * This function must be called inside the same rcu_read_lock() section as has
   * been used to lookup the page in the pagecache radix-tree (or page table):
0139aa7b7   Joonsoo Kim   mm: rename _count...
120
   * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
e286781d5   Nick Piggin   mm: speculative p...
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
   *
   * Unless an RCU grace period has passed, the count of all pages coming out
   * of the allocator must be considered unstable. page_count may return higher
   * than expected, and put_page must be able to do the right thing when the
   * page has been finished with, no matter what it is subsequently allocated
   * for (because put_page is what is used here to drop an invalid speculative
   * reference).
   *
   * This is the interesting part of the lockless pagecache (and lockless
   * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
   * has the following pattern:
   * 1. find page in radix tree
   * 2. conditionally increment refcount
   * 3. check the page is still in pagecache (if no, goto 1)
   *
0139aa7b7   Joonsoo Kim   mm: rename _count...
136
   * Remove-side that cares about stability of _refcount (eg. reclaim) has the
e286781d5   Nick Piggin   mm: speculative p...
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
   * following (with tree_lock held for write):
   * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
   * B. remove page from pagecache
   * C. free the page
   *
   * There are 2 critical interleavings that matter:
   * - 2 runs before A: in this case, A sees elevated refcount and bails out
   * - A runs before 2: in this case, 2 sees zero refcount and retries;
   *   subsequently, B will complete and 1 will find no page, causing the
   *   lookup to return NULL.
   *
   * It is possible that between 1 and 2, the page is removed then the exact same
   * page is inserted into the same position in pagecache. That's OK: the
   * old find_get_page using tree_lock could equally have run before or after
   * such a re-insertion, depending on order that locks are granted.
   *
   * Lookups racing against pagecache insertion isn't a big problem: either 1
   * will find the page or it will not. Likewise, the old find_get_page could run
   * either before the insertion or afterwards, depending on timing.
   */
  static inline int page_cache_get_speculative(struct page *page)
  {
8375ad98c   Paul E. McKenney   vm: adjust ifdef ...
159
  #ifdef CONFIG_TINY_RCU
bdd4e85dc   Frederic Weisbecker   sched: Isolate pr...
160
  # ifdef CONFIG_PREEMPT_COUNT
591a3d7c0   Kirill A. Shutemov   mm: Fix false-pos...
161
  	VM_BUG_ON(!in_atomic() && !irqs_disabled());
e286781d5   Nick Piggin   mm: speculative p...
162
163
164
165
166
167
168
169
170
171
  # endif
  	/*
  	 * Preempt must be disabled here - we rely on rcu_read_lock doing
  	 * this for us.
  	 *
  	 * Pagecache won't be truncated from interrupt context, so if we have
  	 * found a page in the radix tree here, we have pinned its refcount by
  	 * disabling preempt, and hence no need for the "speculative get" that
  	 * SMP requires.
  	 */
309381fea   Sasha Levin   mm: dump page whe...
172
  	VM_BUG_ON_PAGE(page_count(page) == 0, page);
fe896d187   Joonsoo Kim   mm: introduce pag...
173
  	page_ref_inc(page);
e286781d5   Nick Piggin   mm: speculative p...
174
175
176
177
178
179
180
181
182
183
184
  
  #else
  	if (unlikely(!get_page_unless_zero(page))) {
  		/*
  		 * Either the page has been freed, or will be freed.
  		 * In either case, retry here and the caller should
  		 * do the right thing (see comments above).
  		 */
  		return 0;
  	}
  #endif
309381fea   Sasha Levin   mm: dump page whe...
185
  	VM_BUG_ON_PAGE(PageTail(page), page);
e286781d5   Nick Piggin   mm: speculative p...
186
187
188
  
  	return 1;
  }
ce0ad7f09   Nick Piggin   powerpc/mm: Lockl...
189
190
191
192
193
194
  /*
   * Same as above, but add instead of inc (could just be merged)
   */
  static inline int page_cache_add_speculative(struct page *page, int count)
  {
  	VM_BUG_ON(in_interrupt());
b560d8ad8   Paul E. McKenney   rcu: Expunge ling...
195
  #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
bdd4e85dc   Frederic Weisbecker   sched: Isolate pr...
196
  # ifdef CONFIG_PREEMPT_COUNT
591a3d7c0   Kirill A. Shutemov   mm: Fix false-pos...
197
  	VM_BUG_ON(!in_atomic() && !irqs_disabled());
ce0ad7f09   Nick Piggin   powerpc/mm: Lockl...
198
  # endif
309381fea   Sasha Levin   mm: dump page whe...
199
  	VM_BUG_ON_PAGE(page_count(page) == 0, page);
fe896d187   Joonsoo Kim   mm: introduce pag...
200
  	page_ref_add(page, count);
ce0ad7f09   Nick Piggin   powerpc/mm: Lockl...
201
202
  
  #else
fe896d187   Joonsoo Kim   mm: introduce pag...
203
  	if (unlikely(!page_ref_add_unless(page, count, 0)))
ce0ad7f09   Nick Piggin   powerpc/mm: Lockl...
204
205
  		return 0;
  #endif
309381fea   Sasha Levin   mm: dump page whe...
206
  	VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
ce0ad7f09   Nick Piggin   powerpc/mm: Lockl...
207
208
209
  
  	return 1;
  }
44110fe38   Paul Jackson   [PATCH] cpuset me...
210
  #ifdef CONFIG_NUMA
2ae88149a   Nick Piggin   [PATCH] mm: clean...
211
  extern struct page *__page_cache_alloc(gfp_t gfp);
44110fe38   Paul Jackson   [PATCH] cpuset me...
212
  #else
2ae88149a   Nick Piggin   [PATCH] mm: clean...
213
214
215
216
217
  static inline struct page *__page_cache_alloc(gfp_t gfp)
  {
  	return alloc_pages(gfp, 0);
  }
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218
219
  static inline struct page *page_cache_alloc(struct address_space *x)
  {
2ae88149a   Nick Piggin   [PATCH] mm: clean...
220
  	return __page_cache_alloc(mapping_gfp_mask(x));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
221
222
223
224
  }
  
  static inline struct page *page_cache_alloc_cold(struct address_space *x)
  {
2ae88149a   Nick Piggin   [PATCH] mm: clean...
225
  	return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
226
  }
8a5c743e3   Michal Hocko   mm, memcg: use co...
227
  static inline gfp_t readahead_gfp_mask(struct address_space *x)
7b1de5868   Wu Fengguang   readahead: readah...
228
  {
8a5c743e3   Michal Hocko   mm, memcg: use co...
229
230
  	return mapping_gfp_mask(x) |
  				  __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
7b1de5868   Wu Fengguang   readahead: readah...
231
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
232
  typedef int filler_t(void *, struct page *);
e7b563bb2   Johannes Weiner   mm: filemap: move...
233
234
235
236
  pgoff_t page_cache_next_hole(struct address_space *mapping,
  			     pgoff_t index, unsigned long max_scan);
  pgoff_t page_cache_prev_hole(struct address_space *mapping,
  			     pgoff_t index, unsigned long max_scan);
2457aec63   Mel Gorman   mm: non-atomicall...
237
238
239
240
241
242
243
244
  #define FGP_ACCESSED		0x00000001
  #define FGP_LOCK		0x00000002
  #define FGP_CREAT		0x00000004
  #define FGP_WRITE		0x00000008
  #define FGP_NOFS		0x00000010
  #define FGP_NOWAIT		0x00000020
  
  struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
45f87de57   Michal Hocko   mm: get rid of ra...
245
  		int fgp_flags, gfp_t cache_gfp_mask);
2457aec63   Mel Gorman   mm: non-atomicall...
246
247
248
249
250
251
252
253
254
255
256
257
258
259
  
  /**
   * find_get_page - find and get a page reference
   * @mapping: the address_space to search
   * @offset: the page index
   *
   * Looks up the page cache slot at @mapping & @offset.  If there is a
   * page cache page, it is returned with an increased refcount.
   *
   * Otherwise, %NULL is returned.
   */
  static inline struct page *find_get_page(struct address_space *mapping,
  					pgoff_t offset)
  {
45f87de57   Michal Hocko   mm: get rid of ra...
260
  	return pagecache_get_page(mapping, offset, 0, 0);
2457aec63   Mel Gorman   mm: non-atomicall...
261
262
263
264
265
  }
  
  static inline struct page *find_get_page_flags(struct address_space *mapping,
  					pgoff_t offset, int fgp_flags)
  {
45f87de57   Michal Hocko   mm: get rid of ra...
266
  	return pagecache_get_page(mapping, offset, fgp_flags, 0);
2457aec63   Mel Gorman   mm: non-atomicall...
267
268
269
270
  }
  
  /**
   * find_lock_page - locate, pin and lock a pagecache page
2457aec63   Mel Gorman   mm: non-atomicall...
271
272
273
274
275
276
277
278
279
280
281
282
283
284
   * @mapping: the address_space to search
   * @offset: the page index
   *
   * Looks up the page cache slot at @mapping & @offset.  If there is a
   * page cache page, it is returned locked and with an increased
   * refcount.
   *
   * Otherwise, %NULL is returned.
   *
   * find_lock_page() may sleep.
   */
  static inline struct page *find_lock_page(struct address_space *mapping,
  					pgoff_t offset)
  {
45f87de57   Michal Hocko   mm: get rid of ra...
285
  	return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
2457aec63   Mel Gorman   mm: non-atomicall...
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
  }
  
  /**
   * find_or_create_page - locate or add a pagecache page
   * @mapping: the page's address_space
   * @index: the page's index into the mapping
   * @gfp_mask: page allocation mode
   *
   * Looks up the page cache slot at @mapping & @offset.  If there is a
   * page cache page, it is returned locked and with an increased
   * refcount.
   *
   * If the page is not present, a new page is allocated using @gfp_mask
   * and added to the page cache and the VM's LRU list.  The page is
   * returned locked and with an increased refcount.
   *
   * On memory exhaustion, %NULL is returned.
   *
   * find_or_create_page() may sleep, even if @gfp_flags specifies an
   * atomic allocation!
   */
  static inline struct page *find_or_create_page(struct address_space *mapping,
  					pgoff_t offset, gfp_t gfp_mask)
  {
  	return pagecache_get_page(mapping, offset,
  					FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
45f87de57   Michal Hocko   mm: get rid of ra...
312
  					gfp_mask);
2457aec63   Mel Gorman   mm: non-atomicall...
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
  }
  
  /**
   * grab_cache_page_nowait - returns locked page at given index in given cache
   * @mapping: target address_space
   * @index: the page index
   *
   * Same as grab_cache_page(), but do not wait if the page is unavailable.
   * This is intended for speculative data generators, where the data can
   * be regenerated if the page couldn't be grabbed.  This routine should
   * be safe to call while holding the lock for another page.
   *
   * Clear __GFP_FS when allocating the page to avoid recursion into the fs
   * and deadlock against the caller's locked page.
   */
  static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
  				pgoff_t index)
  {
  	return pagecache_get_page(mapping, index,
  			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
45f87de57   Michal Hocko   mm: get rid of ra...
333
  			mapping_gfp_mask(mapping));
2457aec63   Mel Gorman   mm: non-atomicall...
334
  }
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
335
  struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
336
  struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
0cd6144aa   Johannes Weiner   mm + fs: prepare ...
337
338
339
  unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
  			  unsigned int nr_entries, struct page **entries,
  			  pgoff_t *indices);
b947cee4b   Jan Kara   mm: implement fin...
340
341
342
343
344
345
346
347
348
349
  unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
  			pgoff_t end, unsigned int nr_pages,
  			struct page **pages);
  static inline unsigned find_get_pages(struct address_space *mapping,
  			pgoff_t *start, unsigned int nr_pages,
  			struct page **pages)
  {
  	return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
  				    pages);
  }
ebf43500e   Jens Axboe   [PATCH] Add find_...
350
351
  unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
  			       unsigned int nr_pages, struct page **pages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
352
353
  unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
  			int tag, unsigned int nr_pages, struct page **pages);
7e7f77498   Ross Zwisler   mm: add find_get_...
354
355
356
  unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
  			int tag, unsigned int nr_entries,
  			struct page **entries, pgoff_t *indices);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
357

54566b2c1   Nick Piggin   fs: symlink write...
358
359
  struct page *grab_cache_page_write_begin(struct address_space *mapping,
  			pgoff_t index, unsigned flags);
afddba49d   Nick Piggin   fs: introduce wri...
360

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
361
362
363
  /*
   * Returns locked page at given index in given cache, creating it if needed.
   */
57f6b96c0   Fengguang Wu   filemap: convert ...
364
365
  static inline struct page *grab_cache_page(struct address_space *mapping,
  								pgoff_t index)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
366
367
368
  {
  	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
369
  extern struct page * read_cache_page(struct address_space *mapping,
5e5358e7c   Hugh Dickins   mm: cleanup descr...
370
  				pgoff_t index, filler_t *filler, void *data);
0531b2aac   Linus Torvalds   mm: add new 'read...
371
372
  extern struct page * read_cache_page_gfp(struct address_space *mapping,
  				pgoff_t index, gfp_t gfp_mask);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
373
374
  extern int read_cache_pages(struct address_space *mapping,
  		struct list_head *pages, filler_t *filler, void *data);
090d2b185   Pekka Enberg   [PATCH] read_mapp...
375
  static inline struct page *read_mapping_page(struct address_space *mapping,
5e5358e7c   Hugh Dickins   mm: cleanup descr...
376
  				pgoff_t index, void *data)
090d2b185   Pekka Enberg   [PATCH] read_mapp...
377
378
379
380
  {
  	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  	return read_cache_page(mapping, index, filler, data);
  }
e286781d5   Nick Piggin   mm: speculative p...
381
  /*
5cbc198ae   Kirill A. Shutemov   mm: fix false-pos...
382
383
   * Get index of the page with in radix-tree
   * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
a0f7a756c   Naoya Horiguchi   mm/rmap.c: fix pg...
384
   */
5cbc198ae   Kirill A. Shutemov   mm: fix false-pos...
385
  static inline pgoff_t page_to_index(struct page *page)
a0f7a756c   Naoya Horiguchi   mm/rmap.c: fix pg...
386
  {
e9b61f198   Kirill A. Shutemov   thp: reintroduce ...
387
  	pgoff_t pgoff;
e9b61f198   Kirill A. Shutemov   thp: reintroduce ...
388
  	if (likely(!PageTransTail(page)))
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
389
  		return page->index;
e9b61f198   Kirill A. Shutemov   thp: reintroduce ...
390
391
392
393
394
  
  	/*
  	 *  We don't initialize ->index for tail pages: calculate based on
  	 *  head page
  	 */
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
395
  	pgoff = compound_head(page)->index;
e9b61f198   Kirill A. Shutemov   thp: reintroduce ...
396
397
  	pgoff += page - compound_head(page);
  	return pgoff;
a0f7a756c   Naoya Horiguchi   mm/rmap.c: fix pg...
398
399
400
  }
  
  /*
5cbc198ae   Kirill A. Shutemov   mm: fix false-pos...
401
402
403
404
405
406
407
408
409
410
411
412
   * Get the offset in PAGE_SIZE.
   * (TODO: hugepage should have ->index in PAGE_SIZE)
   */
  static inline pgoff_t page_to_pgoff(struct page *page)
  {
  	if (unlikely(PageHeadHuge(page)))
  		return page->index << compound_order(page);
  
  	return page_to_index(page);
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
413
414
415
416
   * Return byte-offset into filesystem object for page.
   */
  static inline loff_t page_offset(struct page *page)
  {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
417
  	return ((loff_t)page->index) << PAGE_SHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
418
  }
f981c5950   Mel Gorman   mm: methods for t...
419
420
  static inline loff_t page_file_offset(struct page *page)
  {
8cd797887   Huang Ying   mm: remove page_f...
421
  	return ((loff_t)page_index(page)) << PAGE_SHIFT;
f981c5950   Mel Gorman   mm: methods for t...
422
  }
0fe6e20b9   Naoya Horiguchi   hugetlb, rmap: ad...
423
424
  extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  				     unsigned long address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
425
426
427
  static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  					unsigned long address)
  {
0fe6e20b9   Naoya Horiguchi   hugetlb, rmap: ad...
428
429
430
431
  	pgoff_t pgoff;
  	if (unlikely(is_vm_hugetlb_page(vma)))
  		return linear_hugepage_index(vma, address);
  	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
432
  	pgoff += vma->vm_pgoff;
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
433
  	return pgoff;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
434
  }
b3c975286   Harvey Harrison   include/linux: Re...
435
436
  extern void __lock_page(struct page *page);
  extern int __lock_page_killable(struct page *page);
d065bd810   Michel Lespinasse   mm: retry page fa...
437
438
  extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
  				unsigned int flags);
b3c975286   Harvey Harrison   include/linux: Re...
439
  extern void unlock_page(struct page *page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
440

529ae9aaa   Nick Piggin   mm: rename page t...
441
442
  static inline int trylock_page(struct page *page)
  {
48c935ad8   Kirill A. Shutemov   page-flags: defin...
443
  	page = compound_head(page);
8413ac9d8   Nick Piggin   mm: page lock use...
444
  	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
529ae9aaa   Nick Piggin   mm: rename page t...
445
  }
db37648cd   Nick Piggin   [PATCH] mm: non s...
446
447
448
  /*
   * lock_page may only be called if we have the page's inode pinned.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
449
450
451
  static inline void lock_page(struct page *page)
  {
  	might_sleep();
529ae9aaa   Nick Piggin   mm: rename page t...
452
  	if (!trylock_page(page))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
453
454
  		__lock_page(page);
  }
db37648cd   Nick Piggin   [PATCH] mm: non s...
455
456
  
  /*
2687a3569   Matthew Wilcox   Add lock_page_kil...
457
458
459
460
461
462
463
   * lock_page_killable is like lock_page but can be interrupted by fatal
   * signals.  It returns 0 if it locked the page and -EINTR if it was
   * killed while waiting.
   */
  static inline int lock_page_killable(struct page *page)
  {
  	might_sleep();
529ae9aaa   Nick Piggin   mm: rename page t...
464
  	if (!trylock_page(page))
2687a3569   Matthew Wilcox   Add lock_page_kil...
465
466
467
468
469
  		return __lock_page_killable(page);
  	return 0;
  }
  
  /*
d065bd810   Michel Lespinasse   mm: retry page fa...
470
471
   * lock_page_or_retry - Lock the page, unless this would block and the
   * caller indicated that it can handle a retry.
9a95f3cf7   Paul Cassella   mm: describe mmap...
472
473
474
   *
   * Return value and mmap_sem implications depend on flags; see
   * __lock_page_or_retry().
d065bd810   Michel Lespinasse   mm: retry page fa...
475
476
477
478
479
480
481
482
483
   */
  static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
  				     unsigned int flags)
  {
  	might_sleep();
  	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
  }
  
  /*
74d81bfae   Nicholas Piggin   mm: un-export wak...
484
485
   * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
   * and should not be used directly.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
486
   */
b3c975286   Harvey Harrison   include/linux: Re...
487
  extern void wait_on_page_bit(struct page *page, int bit_nr);
f62e00cc3   KOSAKI Motohiro   mm: introduce wai...
488
  extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
a4796e37c   NeilBrown   MM: export page_w...
489

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
490
491
492
493
494
495
496
497
498
499
  /* 
   * Wait for a page to be unlocked.
   *
   * This must be called with the caller "holding" the page,
   * ie with increased "page->count" so that the page won't
   * go away during the wait..
   */
  static inline void wait_on_page_locked(struct page *page)
  {
  	if (PageLocked(page))
48c935ad8   Kirill A. Shutemov   page-flags: defin...
500
  		wait_on_page_bit(compound_head(page), PG_locked);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
501
  }
629060270   Nicholas Piggin   mm: add PageWaite...
502
503
504
505
506
507
  static inline int wait_on_page_locked_killable(struct page *page)
  {
  	if (!PageLocked(page))
  		return 0;
  	return wait_on_page_bit_killable(compound_head(page), PG_locked);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
508
509
510
511
512
513
514
515
516
517
  /* 
   * Wait for a page to complete writeback
   */
  static inline void wait_on_page_writeback(struct page *page)
  {
  	if (PageWriteback(page))
  		wait_on_page_bit(page, PG_writeback);
  }
  
  extern void end_page_writeback(struct page *page);
1d1d1a767   Darrick J. Wong   mm: only enforce ...
518
  void wait_for_stable_page(struct page *page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
519

c11f0c0b5   Jens Axboe   block/mm: make bd...
520
  void page_endio(struct page *page, bool is_write, int err);
57d998456   Matthew Wilcox   fs/mpage.c: facto...
521

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
522
  /*
385e1ca5f   David Howells   CacheFiles: Permi...
523
524
   * Add an arbitrary waiter to a page's wait queue
   */
ac6424b98   Ingo Molnar   sched/wait: Renam...
525
  extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
385e1ca5f   David Howells   CacheFiles: Permi...
526
527
  
  /*
4bce9f6ee   Al Viro   get rid of separa...
528
   * Fault everything in given userspace address range in.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
529
530
531
   */
  static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  {
9923777df   Daniel Vetter   mm: fixup compila...
532
  	char __user *end = uaddr + size - 1;
f56f821fe   Daniel Vetter   mm: extend prefau...
533
534
  
  	if (unlikely(size == 0))
e23d4159b   Al Viro   fix fault_in_mult...
535
  		return 0;
f56f821fe   Daniel Vetter   mm: extend prefau...
536

e23d4159b   Al Viro   fix fault_in_mult...
537
538
  	if (unlikely(uaddr > end))
  		return -EFAULT;
f56f821fe   Daniel Vetter   mm: extend prefau...
539
540
541
542
  	/*
  	 * Writing zeroes into userspace here is OK, because we know that if
  	 * the zero gets there, we'll be overwriting it.
  	 */
e23d4159b   Al Viro   fix fault_in_mult...
543
544
545
  	do {
  		if (unlikely(__put_user(0, uaddr) != 0))
  			return -EFAULT;
f56f821fe   Daniel Vetter   mm: extend prefau...
546
  		uaddr += PAGE_SIZE;
e23d4159b   Al Viro   fix fault_in_mult...
547
  	} while (uaddr <= end);
f56f821fe   Daniel Vetter   mm: extend prefau...
548
549
550
551
  
  	/* Check whether the range spilled into the next page. */
  	if (((unsigned long)uaddr & PAGE_MASK) ==
  			((unsigned long)end & PAGE_MASK))
e23d4159b   Al Viro   fix fault_in_mult...
552
  		return __put_user(0, end);
f56f821fe   Daniel Vetter   mm: extend prefau...
553

e23d4159b   Al Viro   fix fault_in_mult...
554
  	return 0;
f56f821fe   Daniel Vetter   mm: extend prefau...
555
  }
4bce9f6ee   Al Viro   get rid of separa...
556
  static inline int fault_in_pages_readable(const char __user *uaddr, int size)
f56f821fe   Daniel Vetter   mm: extend prefau...
557
558
  {
  	volatile char c;
f56f821fe   Daniel Vetter   mm: extend prefau...
559
560
561
  	const char __user *end = uaddr + size - 1;
  
  	if (unlikely(size == 0))
e23d4159b   Al Viro   fix fault_in_mult...
562
  		return 0;
f56f821fe   Daniel Vetter   mm: extend prefau...
563

e23d4159b   Al Viro   fix fault_in_mult...
564
565
566
567
568
569
  	if (unlikely(uaddr > end))
  		return -EFAULT;
  
  	do {
  		if (unlikely(__get_user(c, uaddr) != 0))
  			return -EFAULT;
f56f821fe   Daniel Vetter   mm: extend prefau...
570
  		uaddr += PAGE_SIZE;
e23d4159b   Al Viro   fix fault_in_mult...
571
  	} while (uaddr <= end);
f56f821fe   Daniel Vetter   mm: extend prefau...
572
573
574
575
  
  	/* Check whether the range spilled into the next page. */
  	if (((unsigned long)uaddr & PAGE_MASK) ==
  			((unsigned long)end & PAGE_MASK)) {
e23d4159b   Al Viro   fix fault_in_mult...
576
  		return __get_user(c, end);
f56f821fe   Daniel Vetter   mm: extend prefau...
577
  	}
90b75db64   Dave Chinner   fault_in_multipag...
578
  	(void)c;
e23d4159b   Al Viro   fix fault_in_mult...
579
  	return 0;
f56f821fe   Daniel Vetter   mm: extend prefau...
580
  }
529ae9aaa   Nick Piggin   mm: rename page t...
581
582
583
584
  int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
  				pgoff_t index, gfp_t gfp_mask);
  int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  				pgoff_t index, gfp_t gfp_mask);
97cecb5a2   Minchan Kim   mm: introduce del...
585
  extern void delete_from_page_cache(struct page *page);
62cccb8c8   Johannes Weiner   mm: simplify lock...
586
  extern void __delete_from_page_cache(struct page *page, void *shadow);
ef6a3c631   Miklos Szeredi   mm: add replace_p...
587
  int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
529ae9aaa   Nick Piggin   mm: rename page t...
588
589
590
  
  /*
   * Like add_to_page_cache_locked, but used to add newly allocated pages:
48c935ad8   Kirill A. Shutemov   page-flags: defin...
591
   * the page is new, so we can just run __SetPageLocked() against it.
529ae9aaa   Nick Piggin   mm: rename page t...
592
593
594
595
596
   */
  static inline int add_to_page_cache(struct page *page,
  		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
  {
  	int error;
48c935ad8   Kirill A. Shutemov   page-flags: defin...
597
  	__SetPageLocked(page);
529ae9aaa   Nick Piggin   mm: rename page t...
598
599
  	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
  	if (unlikely(error))
48c935ad8   Kirill A. Shutemov   page-flags: defin...
600
  		__ClearPageLocked(page);
529ae9aaa   Nick Piggin   mm: rename page t...
601
602
  	return error;
  }
b57c2cb9e   Fabian Frederick   pagemap.h: move d...
603
604
  static inline unsigned long dir_pages(struct inode *inode)
  {
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
605
606
  	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
  			       PAGE_SHIFT;
b57c2cb9e   Fabian Frederick   pagemap.h: move d...
607
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
608
  #endif /* _LINUX_PAGEMAP_H */