Blame view

include/linux/mm.h 54.5 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
  #ifndef _LINUX_MM_H
  #define _LINUX_MM_H
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3
4
5
  #include <linux/errno.h>
  
  #ifdef __KERNEL__
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
9
10
  #include <linux/gfp.h>
  #include <linux/list.h>
  #include <linux/mmzone.h>
  #include <linux/rbtree.h>
  #include <linux/prio_tree.h>
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
11
  #include <linux/debug_locks.h>
5b99cd0ef   Heiko Carstens   [PATCH] own heade...
12
  #include <linux/mm_types.h>
08677214e   Yinghai Lu   x86: Make 64 bit ...
13
  #include <linux/range.h>
c6f6b596a   Chris Metcalf   mm: make lowmem_p...
14
  #include <linux/pfn.h>
e9da73d67   Andrea Arcangeli   thp: compound_lock
15
  #include <linux/bit_spinlock.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
17
18
  
  struct mempolicy;
  struct anon_vma;
4e950f6f0   Alexey Dobriyan   Remove fs.h from ...
19
  struct file_ra_state;
e8edc6e03   Alexey Dobriyan   Detach sched.h fr...
20
  struct user_struct;
4e950f6f0   Alexey Dobriyan   Remove fs.h from ...
21
  struct writeback_control;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22
23
24
25
26
27
  
  #ifndef CONFIG_DISCONTIGMEM          /* Don't use mapnrs, do it properly */
  extern unsigned long max_mapnr;
  #endif
  
  extern unsigned long num_physpages;
4481374ce   Jan Beulich   mm: replace vario...
28
  extern unsigned long totalram_pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
29
  extern void * high_memory;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
30
31
32
33
34
35
36
37
38
39
40
  extern int page_cluster;
  
  #ifdef CONFIG_SYSCTL
  extern int sysctl_legacy_va_layout;
  #else
  #define sysctl_legacy_va_layout 0
  #endif
  
  #include <asm/page.h>
  #include <asm/pgtable.h>
  #include <asm/processor.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
41

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
42
  #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
27ac792ca   Andrea Righi   PAGE_ALIGN(): cor...
43
44
  /* to align the pointer to the (next) page boundary */
  #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
45
46
47
48
49
50
51
52
  /*
   * Linux kernel virtual memory manager primitives.
   * The idea being to have a "virtual" mm in the same way
   * we have a virtual fs - giving a cleaner interface to the
   * mm details, and allowing different kinds of memory mappings
   * (from shared memory to executable loading to arbitrary
   * mmap() functions).
   */
c43692e85   Christoph Lameter   [PATCH] Move vm_a...
53
  extern struct kmem_cache *vm_area_cachep;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
54
  #ifndef CONFIG_MMU
8feae1311   David Howells   NOMMU: Make VMAs ...
55
56
  extern struct rb_root nommu_region_tree;
  extern struct rw_semaphore nommu_region_sem;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
57
58
59
60
61
  
  extern unsigned int kobjsize(const void *objp);
  #endif
  
  /*
605d9288b   Hugh Dickins   mm: VM_flags comm...
62
   * vm_flags in vm_area_struct, see mm_types.h.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
63
64
65
66
67
   */
  #define VM_READ		0x00000001	/* currently active flags */
  #define VM_WRITE	0x00000002
  #define VM_EXEC		0x00000004
  #define VM_SHARED	0x00000008
7e2cff42c   Paolo 'Blaisorblade' Giarrusso   [PATCH] mm: add a...
68
  /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
69
70
71
72
73
74
  #define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
  #define VM_MAYWRITE	0x00000020
  #define VM_MAYEXEC	0x00000040
  #define VM_MAYSHARE	0x00000080
  
  #define VM_GROWSDOWN	0x00000100	/* general info on the segment */
8ca3eb080   Tony Luck   guard page for st...
75
  #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
76
  #define VM_GROWSUP	0x00000200
8ca3eb080   Tony Luck   guard page for st...
77
78
  #else
  #define VM_GROWSUP	0x00000000
a664b2d85   Andrea Arcangeli   thp: madvise(MADV...
79
  #define VM_NOHUGEPAGE	0x00000200	/* MADV_NOHUGEPAGE marked this vma */
8ca3eb080   Tony Luck   guard page for st...
80
  #endif
6aab341e0   Linus Torvalds   mm: re-architect ...
81
  #define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
82
83
84
85
86
87
88
89
90
91
92
93
  #define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
  
  #define VM_EXECUTABLE	0x00001000
  #define VM_LOCKED	0x00002000
  #define VM_IO           0x00004000	/* Memory mapped I/O or similar */
  
  					/* Used by sys_madvise() */
  #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
  #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
  
  #define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
  #define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
0b14c179a   Hugh Dickins   [PATCH] unpaged: ...
94
  #define VM_RESERVED	0x00080000	/* Count as reserved_vm like IO */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
95
  #define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
cdfd4325c   Andy Whitcroft   mm: record MAP_NO...
96
  #define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
97
98
  #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
  #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
f2d6bfe9f   Johannes Weiner   thp: add x86 32bi...
99
  #ifndef CONFIG_TRANSPARENT_HUGEPAGE
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
100
  #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
f2d6bfe9f   Johannes Weiner   thp: add x86 32bi...
101
102
103
  #else
  #define VM_HUGEPAGE	0x01000000	/* MADV_HUGEPAGE marked this vma */
  #endif
895791dac   Pallipadi, Venkatesh   VM, x86, PAT: add...
104
  #define VM_INSERTPAGE	0x02000000	/* The vma has had "vm_insert_page()" done on it */
e5b97dde5   Roland McGrath   [PATCH] Add VM_AL...
105
  #define VM_ALWAYSDUMP	0x04000000	/* Always include in core dumps */
d00806b18   Nick Piggin   mm: fix fault vs ...
106

d0217ac04   Nick Piggin   mm: fault feedbac...
107
  #define VM_CAN_NONLINEAR 0x08000000	/* Has ->fault & does nonlinear pages */
b379d7901   Jared Hulbert   mm: introduce VM_...
108
  #define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
aba46c502   Dave Kleikamp   powerpc/mm: Defin...
109
  #define VM_SAO		0x20000000	/* Strong Access Ordering (powerpc) */
895791dac   Pallipadi, Venkatesh   VM, x86, PAT: add...
110
  #define VM_PFN_AT_MMAP	0x40000000	/* PFNMAP vma that is fully mapped at mmap time */
f8af4da3b   Hugh Dickins   ksm: the mm inter...
111
  #define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
112

a8bef8ff6   Mel Gorman   mm: migration: av...
113
114
  /* Bits set in the VMA until the stack is in its final location */
  #define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
  #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
  #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
  #endif
  
  #ifdef CONFIG_STACK_GROWSUP
  #define VM_STACK_FLAGS	(VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
  #else
  #define VM_STACK_FLAGS	(VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
  #endif
  
  #define VM_READHINTMASK			(VM_SEQ_READ | VM_RAND_READ)
  #define VM_ClearReadHint(v)		(v)->vm_flags &= ~VM_READHINTMASK
  #define VM_NormalReadHint(v)		(!((v)->vm_flags & VM_READHINTMASK))
  #define VM_SequentialReadHint(v)	((v)->vm_flags & VM_SEQ_READ)
  #define VM_RandomReadHint(v)		((v)->vm_flags & VM_RAND_READ)
  
  /*
b291f0003   Nick Piggin   mlock: mlocked pa...
132
133
134
135
136
   * special vmas that are non-mergable, non-mlock()able
   */
  #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
137
138
139
140
   * mapping from the currently active vm_flags protection bits (the
   * low four bits) to a page protection mask..
   */
  extern pgprot_t protection_map[16];
d0217ac04   Nick Piggin   mm: fault feedbac...
141
142
  #define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
  #define FAULT_FLAG_NONLINEAR	0x02	/* Fault was via a nonlinear mapping */
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
143
  #define FAULT_FLAG_MKWRITE	0x04	/* Fault was mkwrite of existing pte */
d065bd810   Michel Lespinasse   mm: retry page fa...
144
  #define FAULT_FLAG_ALLOW_RETRY	0x08	/* Retry fault if blocking */
d0217ac04   Nick Piggin   mm: fault feedbac...
145

6bd9cd50c   venkatesh.pallipadi@intel.com   x86: PAT: clarify...
146
147
148
149
150
151
152
153
  /*
   * This interface is used by x86 PAT code to identify a pfn mapping that is
   * linear over entire vma. This is to optimize PAT code that deals with
   * marking the physical region with a particular prot. This is not for generic
   * mm use. Note also that this check will not work if the pfn mapping is
   * linear for a vma starting at physical address 0. In which case PAT code
   * falls back to slow path of reserving physical range page by page.
   */
3c8bb73ac   venkatesh.pallipadi@intel.com   x86: PAT: store v...
154
155
  static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
  {
895791dac   Pallipadi, Venkatesh   VM, x86, PAT: add...
156
  	return (vma->vm_flags & VM_PFN_AT_MMAP);
3c8bb73ac   venkatesh.pallipadi@intel.com   x86: PAT: store v...
157
158
159
160
161
162
  }
  
  static inline int is_pfn_mapping(struct vm_area_struct *vma)
  {
  	return (vma->vm_flags & VM_PFNMAP);
  }
d0217ac04   Nick Piggin   mm: fault feedbac...
163

54cb8821d   Nick Piggin   mm: merge populat...
164
  /*
d0217ac04   Nick Piggin   mm: fault feedbac...
165
   * vm_fault is filled by the the pagefault handler and passed to the vma's
83c54070e   Nick Piggin   mm: fault feedbac...
166
167
   * ->fault function. The vma's ->fault is responsible for returning a bitmask
   * of VM_FAULT_xxx flags that give details about how the fault was handled.
54cb8821d   Nick Piggin   mm: merge populat...
168
   *
d0217ac04   Nick Piggin   mm: fault feedbac...
169
170
171
   * pgoff should be used in favour of virtual_address, if possible. If pgoff
   * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear
   * mapping support.
54cb8821d   Nick Piggin   mm: merge populat...
172
   */
d0217ac04   Nick Piggin   mm: fault feedbac...
173
174
175
176
177
178
  struct vm_fault {
  	unsigned int flags;		/* FAULT_FLAG_xxx flags */
  	pgoff_t pgoff;			/* Logical page offset based on vma */
  	void __user *virtual_address;	/* Faulting virtual address */
  
  	struct page *page;		/* ->fault handlers should return a
83c54070e   Nick Piggin   mm: fault feedbac...
179
  					 * page here, unless VM_FAULT_NOPAGE
d0217ac04   Nick Piggin   mm: fault feedbac...
180
  					 * is set (which is also implied by
83c54070e   Nick Piggin   mm: fault feedbac...
181
  					 * VM_FAULT_ERROR).
d0217ac04   Nick Piggin   mm: fault feedbac...
182
  					 */
54cb8821d   Nick Piggin   mm: merge populat...
183
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
184
185
186
187
188
189
190
191
192
  
  /*
   * These are the virtual MM functions - opening of an area, closing and
   * unmapping it (needed to keep files on disk up-to-date etc), pointer
   * to the functions called when a no-page or a wp-page exception occurs. 
   */
  struct vm_operations_struct {
  	void (*open)(struct vm_area_struct * area);
  	void (*close)(struct vm_area_struct * area);
d0217ac04   Nick Piggin   mm: fault feedbac...
193
  	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
9637a5efd   David Howells   [PATCH] add page_...
194
195
196
  
  	/* notification that a previously read-only page is about to become
  	 * writable, if an error is returned it will cause a SIGBUS */
c2ec175c3   Nick Piggin   mm: page_mkwrite ...
197
  	int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
28b2ee20c   Rik van Riel   access_process_vm...
198
199
200
201
202
203
  
  	/* called by access_process_vm when get_user_pages() fails, typically
  	 * for use by special VMAs that can switch between memory and hardware
  	 */
  	int (*access)(struct vm_area_struct *vma, unsigned long addr,
  		      void *buf, int len, int write);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
204
  #ifdef CONFIG_NUMA
a6020ed75   Lee Schermerhorn   mempolicy: docume...
205
206
207
208
209
210
211
  	/*
  	 * set_policy() op must add a reference to any non-NULL @new mempolicy
  	 * to hold the policy upon return.  Caller should pass NULL @new to
  	 * remove a policy and fall back to surrounding context--i.e. do not
  	 * install a MPOL_DEFAULT policy, nor the task or system default
  	 * mempolicy.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
212
  	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
a6020ed75   Lee Schermerhorn   mempolicy: docume...
213
214
215
216
217
218
219
220
221
222
223
  
  	/*
  	 * get_policy() op must add reference [mpol_get()] to any policy at
  	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
  	 * in mm/mempolicy.c will do this automatically.
  	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
  	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
  	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
  	 * must return NULL--i.e., do not "fallback" to task or system default
  	 * policy.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
224
225
  	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
  					unsigned long addr);
7b2259b3e   Christoph Lameter   [PATCH] page migr...
226
227
  	int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
  		const nodemask_t *to, unsigned long flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
228
229
230
231
232
  #endif
  };
  
  struct mmu_gather;
  struct inode;
349aef0bc   Andrew Morton   [PATCH] shrink st...
233
234
  #define page_private(page)		((page)->private)
  #define set_page_private(page, v)	((page)->private = (v))
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
235

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
236
237
238
239
240
  /*
   * FIXME: take this include out, include page-flags.h in
   * files which need it (119 of them)
   */
  #include <linux/page-flags.h>
71e3aac07   Andrea Arcangeli   thp: transparent ...
241
  #include <linux/huge_mm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
242
243
244
245
246
247
248
249
250
251
252
253
  
  /*
   * Methods to modify the page usage count.
   *
   * What counts for a page usage:
   * - cache mapping   (page->mapping)
   * - private data    (page->private)
   * - page mapped in a task's page tables, each mapping
   *   is counted separately
   *
   * Also, many kernel routines increase the page count before a critical
   * routine so they can be sure the page doesn't go away from under them.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
254
255
256
   */
  
  /*
da6052f7b   Nick Piggin   [PATCH] update so...
257
   * Drop a ref, return true if the refcount fell to zero (the page has no users)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
258
   */
7c8ee9a86   Nick Piggin   [PATCH] mm: simpl...
259
260
  static inline int put_page_testzero(struct page *page)
  {
725d704ec   Nick Piggin   [PATCH] mm: VM_BU...
261
  	VM_BUG_ON(atomic_read(&page->_count) == 0);
8dc04efbf   Nick Piggin   [PATCH] mm: de-sk...
262
  	return atomic_dec_and_test(&page->_count);
7c8ee9a86   Nick Piggin   [PATCH] mm: simpl...
263
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
264
265
  
  /*
7c8ee9a86   Nick Piggin   [PATCH] mm: simpl...
266
267
   * Try to grab a ref unless the page has a refcount of zero, return false if
   * that is the case.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
268
   */
7c8ee9a86   Nick Piggin   [PATCH] mm: simpl...
269
270
  static inline int get_page_unless_zero(struct page *page)
  {
8dc04efbf   Nick Piggin   [PATCH] mm: de-sk...
271
  	return atomic_inc_not_zero(&page->_count);
7c8ee9a86   Nick Piggin   [PATCH] mm: simpl...
272
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
273

53df8fdc1   Wu Fengguang   Move page_is_ram(...
274
  extern int page_is_ram(unsigned long pfn);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
275
  /* Support for virtually mapped pages */
b3bdda02a   Christoph Lameter   vmalloc: add cons...
276
277
  struct page *vmalloc_to_page(const void *addr);
  unsigned long vmalloc_to_pfn(const void *addr);
48667e7a4   Christoph Lameter   Move vmalloc_to_p...
278

0738c4bb8   Paul Mundt   nommu: Provide is...
279
280
281
282
283
284
  /*
   * Determine if an address is within the vmalloc range
   *
   * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
   * is no special casing required.
   */
9e2779fa2   Christoph Lameter   is_vmalloc_addr()...
285
286
  static inline int is_vmalloc_addr(const void *x)
  {
0738c4bb8   Paul Mundt   nommu: Provide is...
287
  #ifdef CONFIG_MMU
9e2779fa2   Christoph Lameter   is_vmalloc_addr()...
288
289
290
  	unsigned long addr = (unsigned long)x;
  
  	return addr >= VMALLOC_START && addr < VMALLOC_END;
0738c4bb8   Paul Mundt   nommu: Provide is...
291
292
  #else
  	return 0;
8ca3ed87d   David Howells   NOMMU: is_vmalloc...
293
  #endif
0738c4bb8   Paul Mundt   nommu: Provide is...
294
  }
81ac3ad90   KAMEZAWA Hiroyuki   kcore: register m...
295
296
297
  #ifdef CONFIG_MMU
  extern int is_vmalloc_or_module_addr(const void *x);
  #else
934831d06   David Howells   NOMMU: Fallback f...
298
  static inline int is_vmalloc_or_module_addr(const void *x)
81ac3ad90   KAMEZAWA Hiroyuki   kcore: register m...
299
300
301
302
  {
  	return 0;
  }
  #endif
9e2779fa2   Christoph Lameter   is_vmalloc_addr()...
303

e9da73d67   Andrea Arcangeli   thp: compound_lock
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
  static inline void compound_lock(struct page *page)
  {
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  	bit_spin_lock(PG_compound_lock, &page->flags);
  #endif
  }
  
  static inline void compound_unlock(struct page *page)
  {
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  	bit_spin_unlock(PG_compound_lock, &page->flags);
  #endif
  }
  
  static inline unsigned long compound_lock_irqsave(struct page *page)
  {
  	unsigned long uninitialized_var(flags);
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  	local_irq_save(flags);
  	compound_lock(page);
  #endif
  	return flags;
  }
  
  static inline void compound_unlock_irqrestore(struct page *page,
  					      unsigned long flags)
  {
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  	compound_unlock(page);
  	local_irq_restore(flags);
  #endif
  }
d85f33855   Christoph Lameter   Make page->privat...
336
337
  static inline struct page *compound_head(struct page *page)
  {
6d7779538   Christoph Lameter   mm: optimize comp...
338
  	if (unlikely(PageTail(page)))
d85f33855   Christoph Lameter   Make page->privat...
339
340
341
  		return page->first_page;
  	return page;
  }
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
342
  static inline int page_count(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
343
  {
d85f33855   Christoph Lameter   Make page->privat...
344
  	return atomic_read(&compound_head(page)->_count);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
345
346
347
348
  }
  
  static inline void get_page(struct page *page)
  {
918070634   Andrea Arcangeli   thp: alter compou...
349
350
351
352
353
354
355
356
357
  	/*
  	 * Getting a normal page or the head of a compound page
  	 * requires to already have an elevated page->_count. Only if
  	 * we're getting a tail page, the elevated page->_count is
  	 * required only in the head page, so for tail pages the
  	 * bugcheck only verifies that the page->_count isn't
  	 * negative.
  	 */
  	VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
358
  	atomic_inc(&page->_count);
918070634   Andrea Arcangeli   thp: alter compou...
359
360
361
362
363
364
365
366
367
368
369
370
371
  	/*
  	 * Getting a tail page will elevate both the head and tail
  	 * page->_count(s).
  	 */
  	if (unlikely(PageTail(page))) {
  		/*
  		 * This is safe only because
  		 * __split_huge_page_refcount can't run under
  		 * get_page().
  		 */
  		VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
  		atomic_inc(&page->first_page->_count);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
372
  }
b49af68ff   Christoph Lameter   Add virt_to_head_...
373
374
375
376
377
  static inline struct page *virt_to_head_page(const void *x)
  {
  	struct page *page = virt_to_page(x);
  	return compound_head(page);
  }
7835e98b2   Nick Piggin   [PATCH] remove se...
378
379
380
381
382
383
384
385
  /*
   * Setup the page count before being freed into the page allocator for
   * the first time (boot or memory hotplug)
   */
  static inline void init_page_count(struct page *page)
  {
  	atomic_set(&page->_count, 1);
  }
5f24ce5fd   Andrea Arcangeli   thp: remove PG_buddy
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
  /*
   * PageBuddy() indicate that the page is free and in the buddy system
   * (see mm/page_alloc.c).
   */
  static inline int PageBuddy(struct page *page)
  {
  	return atomic_read(&page->_mapcount) == -2;
  }
  
  static inline void __SetPageBuddy(struct page *page)
  {
  	VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
  	atomic_set(&page->_mapcount, -2);
  }
  
  static inline void __ClearPageBuddy(struct page *page)
  {
  	VM_BUG_ON(!PageBuddy(page));
  	atomic_set(&page->_mapcount, -1);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
406
  void put_page(struct page *page);
1d7ea7324   Alexander Zarochentsev   [PATCH] fuse: fix...
407
  void put_pages_list(struct list_head *pages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
408

8dfcc9ba2   Nick Piggin   [PATCH] mm: split...
409
  void split_page(struct page *page, unsigned int order);
748446bb6   Mel Gorman   mm: compaction: m...
410
  int split_free_page(struct page *page);
8dfcc9ba2   Nick Piggin   [PATCH] mm: split...
411

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
412
  /*
33f2ef89f   Andy Whitcroft   [PATCH] mm: make ...
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
   * Compound pages have a destructor function.  Provide a
   * prototype for that function and accessor functions.
   * These are _only_ valid on the head of a PG_compound page.
   */
  typedef void compound_page_dtor(struct page *);
  
  static inline void set_compound_page_dtor(struct page *page,
  						compound_page_dtor *dtor)
  {
  	page[1].lru.next = (void *)dtor;
  }
  
  static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
  {
  	return (compound_page_dtor *)page[1].lru.next;
  }
d85f33855   Christoph Lameter   Make page->privat...
429
430
  static inline int compound_order(struct page *page)
  {
6d7779538   Christoph Lameter   mm: optimize comp...
431
  	if (!PageHead(page))
d85f33855   Christoph Lameter   Make page->privat...
432
433
434
  		return 0;
  	return (unsigned long)page[1].lru.prev;
  }
37c2ac787   Andrea Arcangeli   thp: compound_tra...
435
436
437
438
439
440
441
442
443
444
445
446
447
  static inline int compound_trans_order(struct page *page)
  {
  	int order;
  	unsigned long flags;
  
  	if (!PageHead(page))
  		return 0;
  
  	flags = compound_lock_irqsave(page);
  	order = compound_order(page);
  	compound_unlock_irqrestore(page, flags);
  	return order;
  }
d85f33855   Christoph Lameter   Make page->privat...
448
449
450
451
  static inline void set_compound_order(struct page *page, unsigned long order)
  {
  	page[1].lru.prev = (void *)order;
  }
3dece370e   Michal Simek   mm: System withou...
452
  #ifdef CONFIG_MMU
33f2ef89f   Andy Whitcroft   [PATCH] mm: make ...
453
  /*
14fd403f2   Andrea Arcangeli   thp: export maybe...
454
455
456
457
458
459
460
461
462
463
464
   * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
   * servicing faults for write access.  In the normal case, do always want
   * pte_mkwrite.  But get_user_pages can cause write faults for mappings
   * that do not have writing enabled, when used by access_process_vm.
   */
  static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
  {
  	if (likely(vma->vm_flags & VM_WRITE))
  		pte = pte_mkwrite(pte);
  	return pte;
  }
3dece370e   Michal Simek   mm: System withou...
465
  #endif
14fd403f2   Andrea Arcangeli   thp: export maybe...
466
467
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
468
469
470
471
472
473
   * Multiple processes may "see" the same page. E.g. for untouched
   * mappings of /dev/null, all processes see the same page full of
   * zeroes, and text pages of executables and shared libraries have
   * only one copy in memory, at most, normally.
   *
   * For the non-reserved pages, page_count(page) denotes a reference count.
7e871b6c8   Paolo 'Blaisorblade' Giarrusso   [PATCH] mm: updat...
474
475
   *   page_count() == 0 means the page is free. page->lru is then used for
   *   freelist management in the buddy allocator.
da6052f7b   Nick Piggin   [PATCH] update so...
476
   *   page_count() > 0  means the page has been allocated.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
477
   *
da6052f7b   Nick Piggin   [PATCH] update so...
478
479
480
481
482
   * Pages are allocated by the slab allocator in order to provide memory
   * to kmalloc and kmem_cache_alloc. In this case, the management of the
   * page, and the fields in 'struct page' are the responsibility of mm/slab.c
   * unless a particular usage is carefully commented. (the responsibility of
   * freeing the kmalloc memory is the caller's, of course).
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
483
   *
da6052f7b   Nick Piggin   [PATCH] update so...
484
485
486
487
488
489
490
491
492
   * A page may be used by anyone else who does a __get_free_page().
   * In this case, page_count still tracks the references, and should only
   * be used through the normal accessor functions. The top bits of page->flags
   * and page->virtual store page management information, but all other fields
   * are unused and could be used privately, carefully. The management of this
   * page is the responsibility of the one who allocated it, and those who have
   * subsequently been given references to it.
   *
   * The other pages (we may call them "pagecache pages") are completely
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
493
494
495
   * managed by the Linux memory manager: I/O, buffers, swapping etc.
   * The following discussion applies only to them.
   *
da6052f7b   Nick Piggin   [PATCH] update so...
496
497
498
499
   * A pagecache page contains an opaque `private' member, which belongs to the
   * page's address_space. Usually, this is the address of a circular list of
   * the page's disk buffers. PG_private must be set to tell the VM to call
   * into the filesystem to release these pages.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
500
   *
da6052f7b   Nick Piggin   [PATCH] update so...
501
502
503
   * A page may belong to an inode's memory mapping. In this case, page->mapping
   * is the pointer to the inode, and page->index is the file offset of the page,
   * in units of PAGE_CACHE_SIZE.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
504
   *
da6052f7b   Nick Piggin   [PATCH] update so...
505
506
507
   * If pagecache pages are not associated with an inode, they are said to be
   * anonymous pages. These may become associated with the swapcache, and in that
   * case PG_swapcache is set, and page->private is an offset into the swapcache.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
508
   *
da6052f7b   Nick Piggin   [PATCH] update so...
509
510
511
   * In either case (swapcache or inode backed), the pagecache itself holds one
   * reference to the page. Setting PG_private should also increment the
   * refcount. The each user mapping also has a reference to the page.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
512
   *
da6052f7b   Nick Piggin   [PATCH] update so...
513
514
515
516
   * The pagecache pages are stored in a per-mapping radix tree, which is
   * rooted at mapping->page_tree, and indexed by offset.
   * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
   * lists, we instead now tag pages as dirty/writeback in the radix tree.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
517
   *
da6052f7b   Nick Piggin   [PATCH] update so...
518
   * All pagecache pages may be subject to I/O:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
519
520
   * - inode pages may need to be read from disk,
   * - inode pages which have been modified and are MAP_SHARED may need
da6052f7b   Nick Piggin   [PATCH] update so...
521
522
523
524
   *   to be written back to the inode on disk,
   * - anonymous pages (including MAP_PRIVATE file mappings) which have been
   *   modified may need to be swapped out to swap space and (later) to be read
   *   back into memory.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
525
526
527
528
529
   */
  
  /*
   * The zone field is never updated after free_area_init_core()
   * sets it, so none of the operations on it need to be atomic.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
530
   */
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
531

d41dee369   Andy Whitcroft   [PATCH] sparsemem...
532
533
534
535
536
537
538
539
540
541
542
  
  /*
   * page->flags layout:
   *
   * There are three possibilities for how page->flags get
   * laid out.  The first is for the normal case, without
   * sparsemem.  The second is for sparsemem when there is
   * plenty of space for node and section.  The last is when
   * we have run out of space and have to fall back to an
   * alternate (slower) way of determining the node.
   *
308c05e35   Christoph Lameter   sparsemem: vmemma...
543
544
545
   * No sparsemem or sparsemem vmemmap: |       NODE     | ZONE | ... | FLAGS |
   * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
   * classic sparse no space for node:  | SECTION |     ZONE    | ... | FLAGS |
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
546
   */
308c05e35   Christoph Lameter   sparsemem: vmemma...
547
  #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
548
549
550
551
552
553
  #define SECTIONS_WIDTH		SECTIONS_SHIFT
  #else
  #define SECTIONS_WIDTH		0
  #endif
  
  #define ZONES_WIDTH		ZONES_SHIFT
9223b4190   Christoph Lameter   pageflags: get ri...
554
  #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
555
556
  #define NODES_WIDTH		NODES_SHIFT
  #else
308c05e35   Christoph Lameter   sparsemem: vmemma...
557
558
559
  #ifdef CONFIG_SPARSEMEM_VMEMMAP
  #error "Vmemmap: No space for nodes field in page flags"
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
560
561
562
563
  #define NODES_WIDTH		0
  #endif
  
  /* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
07808b74e   Andi Kleen   [PATCH] x86_64: R...
564
  #define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
565
566
567
568
569
570
571
  #define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
  #define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
  
  /*
   * We are going to use the flags for the page to node mapping if its in
   * there.  This includes the case where there is no node, so it is implicit.
   */
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
572
573
574
  #if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
  #define NODE_NOT_IN_PAGE_FLAGS
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
575
576
577
578
  
  #ifndef PFN_SECTION_SHIFT
  #define PFN_SECTION_SHIFT 0
  #endif
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
579
580
581
582
583
584
  
  /*
   * Define the bit shifts to access each section.  For non-existant
   * sections we define the shift as 0; that plus a 0 mask ensures
   * the compiler will optimise away reference to them.
   */
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
585
586
587
  #define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
  #define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
  #define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
588

bce54bbfd   Will Deacon   mm: fix typo in m...
589
590
  /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
  #ifdef NODE_NOT_IN_PAGE_FLAGS
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
591
  #define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
bd8029b66   Andy Whitcroft   [PATCH] zoneid: f...
592
593
  #define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
  						SECTIONS_PGOFF : ZONES_PGOFF)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
594
  #else
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
595
  #define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
bd8029b66   Andy Whitcroft   [PATCH] zoneid: f...
596
597
  #define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
  						NODES_PGOFF : ZONES_PGOFF)
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
598
  #endif
bd8029b66   Andy Whitcroft   [PATCH] zoneid: f...
599
  #define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
600

9223b4190   Christoph Lameter   pageflags: get ri...
601
602
  #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
  #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
603
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
604
605
606
  #define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
  #define NODES_MASK		((1UL << NODES_WIDTH) - 1)
  #define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
607
  #define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
608

2f1b62486   Christoph Lameter   [PATCH] reduce MA...
609
  static inline enum zone_type page_zonenum(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
610
  {
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
611
  	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
612
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
613

89689ae7f   Christoph Lameter   [PATCH] Get rid o...
614
615
616
617
618
619
620
621
  /*
   * The identification function is only used by the buddy allocator for
   * determining if two pages could be buddies. We are not really
   * identifying a zone since we could be using a the section number
   * id if we have not node id available in page flags.
   * We guarantee only that it will return the same value for two
   * combinable pages in a zone.
   */
cb2b95e1c   Andy Whitcroft   [PATCH] zone hand...
622
623
  static inline int page_zone_id(struct page *page)
  {
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
624
  	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
625
  }
25ba77c14   Andy Whitcroft   [PATCH] numa node...
626
  static inline int zone_to_nid(struct zone *zone)
89fa30242   Christoph Lameter   [PATCH] NUMA: Add...
627
  {
d5f541ed6   Christoph Lameter   [PATCH] Add node ...
628
629
630
631
632
  #ifdef CONFIG_NUMA
  	return zone->node;
  #else
  	return 0;
  #endif
89fa30242   Christoph Lameter   [PATCH] NUMA: Add...
633
  }
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
634
  #ifdef NODE_NOT_IN_PAGE_FLAGS
25ba77c14   Andy Whitcroft   [PATCH] numa node...
635
  extern int page_to_nid(struct page *page);
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
636
  #else
25ba77c14   Andy Whitcroft   [PATCH] numa node...
637
  static inline int page_to_nid(struct page *page)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
638
  {
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
639
  	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
640
  }
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
641
642
643
644
645
646
  #endif
  
  static inline struct zone *page_zone(struct page *page)
  {
  	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
  }
308c05e35   Christoph Lameter   sparsemem: vmemma...
647
  #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
648
649
650
651
  static inline unsigned long page_to_section(struct page *page)
  {
  	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
  }
308c05e35   Christoph Lameter   sparsemem: vmemma...
652
  #endif
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
653

2f1b62486   Christoph Lameter   [PATCH] reduce MA...
654
  static inline void set_page_zone(struct page *page, enum zone_type zone)
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
655
656
657
658
  {
  	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
  	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
  }
2f1b62486   Christoph Lameter   [PATCH] reduce MA...
659

348f8b6c4   Dave Hansen   [PATCH] sparsemem...
660
661
662
663
  static inline void set_page_node(struct page *page, unsigned long node)
  {
  	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
  	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
664
  }
89689ae7f   Christoph Lameter   [PATCH] Get rid o...
665

d41dee369   Andy Whitcroft   [PATCH] sparsemem...
666
667
668
669
670
  static inline void set_page_section(struct page *page, unsigned long section)
  {
  	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
  	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
671

2f1b62486   Christoph Lameter   [PATCH] reduce MA...
672
  static inline void set_page_links(struct page *page, enum zone_type zone,
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
673
  	unsigned long node, unsigned long pfn)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
674
  {
348f8b6c4   Dave Hansen   [PATCH] sparsemem...
675
676
  	set_page_zone(page, zone);
  	set_page_node(page, node);
d41dee369   Andy Whitcroft   [PATCH] sparsemem...
677
  	set_page_section(page, pfn_to_section_nr(pfn));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
678
  }
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
679
680
681
682
  /*
   * Some inline functions in vmstat.h depend on page_zone()
   */
  #include <linux/vmstat.h>
652050aec   Ingo Molnar   [PATCH] mark seve...
683
  static __always_inline void *lowmem_page_address(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
684
  {
c6f6b596a   Chris Metcalf   mm: make lowmem_p...
685
  	return __va(PFN_PHYS(page_to_pfn(page)));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
  }
  
  #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
  #define HASHED_PAGE_VIRTUAL
  #endif
  
  #if defined(WANT_PAGE_VIRTUAL)
  #define page_address(page) ((page)->virtual)
  #define set_page_address(page, address)			\
  	do {						\
  		(page)->virtual = (address);		\
  	} while(0)
  #define page_address_init()  do { } while(0)
  #endif
  
  #if defined(HASHED_PAGE_VIRTUAL)
  void *page_address(struct page *page);
  void set_page_address(struct page *page, void *virtual);
  void page_address_init(void);
  #endif
  
  #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
  #define page_address(page) lowmem_page_address(page)
  #define set_page_address(page, address)  do { } while(0)
  #define page_address_init()  do { } while(0)
  #endif
  
  /*
   * On an anonymous page mapped into a user virtual memory area,
   * page->mapping points to its anon_vma, not to a struct address_space;
3ca7b3c5b   Hugh Dickins   mm: define PAGE_M...
716
717
718
719
720
721
722
723
   * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
   *
   * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
   * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
   * and then page->mapping points, not to an anon_vma, but to a private
   * structure which KSM associates with that merged page.  See ksm.h.
   *
   * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
724
725
726
727
728
729
   *
   * Please note that, confusingly, "page_mapping" refers to the inode
   * address_space which maps the page from disk; whereas "page_mapped"
   * refers to user virtual address space into which the page is mapped.
   */
  #define PAGE_MAPPING_ANON	1
3ca7b3c5b   Hugh Dickins   mm: define PAGE_M...
730
731
  #define PAGE_MAPPING_KSM	2
  #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
732
733
734
735
736
  
  extern struct address_space swapper_space;
  static inline struct address_space *page_mapping(struct page *page)
  {
  	struct address_space *mapping = page->mapping;
b5fab14e5   Christoph Lameter   Add VM_BUG_ON in ...
737
  	VM_BUG_ON(PageSlab(page));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
738
739
  	if (unlikely(PageSwapCache(page)))
  		mapping = &swapper_space;
e20e87795   Steven Rostedt   mm: remove unlike...
740
  	else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
741
742
743
  		mapping = NULL;
  	return mapping;
  }
3ca7b3c5b   Hugh Dickins   mm: define PAGE_M...
744
745
746
747
748
  /* Neutral page->mapping pointer to address_space or anon_vma or other */
  static inline void *page_rmapping(struct page *page)
  {
  	return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
749
750
751
752
753
754
755
756
757
758
759
760
  static inline int PageAnon(struct page *page)
  {
  	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
  }
  
  /*
   * Return the pagecache index of the passed page.  Regular pagecache pages
   * use ->index whereas swapcache pages use ->private
   */
  static inline pgoff_t page_index(struct page *page)
  {
  	if (unlikely(PageSwapCache(page)))
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
761
  		return page_private(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
  	return page->index;
  }
  
  /*
   * The atomic page->_mapcount, like _count, starts from -1:
   * so that transitions both from it and to it can be tracked,
   * using atomic_inc_and_test and atomic_add_negative(-1).
   */
  static inline void reset_page_mapcount(struct page *page)
  {
  	atomic_set(&(page)->_mapcount, -1);
  }
  
  static inline int page_mapcount(struct page *page)
  {
  	return atomic_read(&(page)->_mapcount) + 1;
  }
  
  /*
   * Return true if this page is mapped into pagetables.
   */
  static inline int page_mapped(struct page *page)
  {
  	return atomic_read(&(page)->_mapcount) >= 0;
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
789
790
791
792
   * Different kinds of faults, as returned by handle_mm_fault().
   * Used to decide whether a process gets delivered SIGBUS or
   * just gets major/minor fault counters bumped up.
   */
d0217ac04   Nick Piggin   mm: fault feedbac...
793

83c54070e   Nick Piggin   mm: fault feedbac...
794
  #define VM_FAULT_MINOR	0 /* For backwards compat. Remove me quickly. */
d0217ac04   Nick Piggin   mm: fault feedbac...
795

83c54070e   Nick Piggin   mm: fault feedbac...
796
797
798
799
  #define VM_FAULT_OOM	0x0001
  #define VM_FAULT_SIGBUS	0x0002
  #define VM_FAULT_MAJOR	0x0004
  #define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */
aa50d3a7a   Andi Kleen   Encode huge page ...
800
801
  #define VM_FAULT_HWPOISON 0x0010	/* Hit poisoned small page */
  #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
f33ea7f40   Nick Piggin   [PATCH] fix get_u...
802

83c54070e   Nick Piggin   mm: fault feedbac...
803
804
  #define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */
  #define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
d065bd810   Michel Lespinasse   mm: retry page fa...
805
  #define VM_FAULT_RETRY	0x0400	/* ->fault blocked, must retry */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
806

aa50d3a7a   Andi Kleen   Encode huge page ...
807
808
809
810
811
812
813
814
  #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
  
  #define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
  			 VM_FAULT_HWPOISON_LARGE)
  
  /* Encode hstate index for a hwpoisoned large page */
  #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
  #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
d0217ac04   Nick Piggin   mm: fault feedbac...
815

1c0fe6e3b   Nick Piggin   mm: invoke oom-ki...
816
817
818
819
  /*
   * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
   */
  extern void pagefault_out_of_memory(void);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
820
821
822
  #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
  
  extern void show_free_areas(void);
3f96b79ad   Hugh Dickins   tmpfs: depend on ...
823
  int shmem_lock(struct file *file, int lock, struct user_struct *user);
168f5ac66   Sergei Trofimovich   mm cleanup: shmem...
824
  struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
825
  int shmem_zero_setup(struct vm_area_struct *);
b0e15190e   David Howells   [PATCH] NOMMU: Ma...
826
827
828
829
830
831
832
  #ifndef CONFIG_MMU
  extern unsigned long shmem_get_unmapped_area(struct file *file,
  					     unsigned long addr,
  					     unsigned long len,
  					     unsigned long pgoff,
  					     unsigned long flags);
  #endif
e8edc6e03   Alexey Dobriyan   Detach sched.h fr...
833
  extern int can_do_mlock(void);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
834
835
836
837
838
839
840
841
842
843
844
845
  extern int user_shm_lock(size_t, struct user_struct *);
  extern void user_shm_unlock(size_t, struct user_struct *);
  
  /*
   * Parameter block passed down to zap_pte_range in exceptional cases.
   */
  struct zap_details {
  	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */
  	struct address_space *check_mapping;	/* Check page->mapping if set */
  	pgoff_t	first_index;			/* Lowest page->index to unmap */
  	pgoff_t last_index;			/* Highest page->index to unmap */
  	spinlock_t *i_mmap_lock;		/* For unmap_mapping_range: */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
846
847
  	unsigned long truncate_count;		/* Compare vm_truncate_count */
  };
7e675137a   Nick Piggin   mm: introduce pte...
848
849
  struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
  		pte_t pte);
c627f9cc0   Jack Steiner   mm: add zap_vma_p...
850
851
  int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
  		unsigned long size);
ee39b37b2   Hugh Dickins   [PATCH] freepgt: ...
852
  unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
853
  		unsigned long size, struct zap_details *);
508034a32   Hugh Dickins   [PATCH] mm: unmap...
854
  unsigned long unmap_vmas(struct mmu_gather **tlb,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
855
856
857
  		struct vm_area_struct *start_vma, unsigned long start_addr,
  		unsigned long end_addr, unsigned long *nr_accounted,
  		struct zap_details *);
e6473092b   Matt Mackall   maps4: introduce ...
858
859
860
861
862
863
864
865
  
  /**
   * mm_walk - callbacks for walk_page_range
   * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
   * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
   * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
   * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
   * @pte_hole: if set, called for each hole at all levels
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
866
   * @hugetlb_entry: if set, called for each hugetlb entry
e6473092b   Matt Mackall   maps4: introduce ...
867
868
869
870
   *
   * (see walk_page_range for more details)
   */
  struct mm_walk {
2165009bd   Dave Hansen   pagemap: pass mm ...
871
872
873
874
875
  	int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
  	int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
  	int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
  	int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
  	int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
116354d17   Naoya Horiguchi   pagemap: fix pfn ...
876
877
  	int (*hugetlb_entry)(pte_t *, unsigned long,
  			     unsigned long, unsigned long, struct mm_walk *);
2165009bd   Dave Hansen   pagemap: pass mm ...
878
879
  	struct mm_struct *mm;
  	void *private;
e6473092b   Matt Mackall   maps4: introduce ...
880
  };
2165009bd   Dave Hansen   pagemap: pass mm ...
881
882
  int walk_page_range(unsigned long addr, unsigned long end,
  		struct mm_walk *walk);
42b777281   Jan Beulich   mm: remove double...
883
  void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
3bf5ee956   Hugh Dickins   [PATCH] freepgt: ...
884
  		unsigned long end, unsigned long floor, unsigned long ceiling);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
885
886
  int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
  			struct vm_area_struct *vma);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
887
888
  void unmap_mapping_range(struct address_space *mapping,
  		loff_t const holebegin, loff_t const holelen, int even_cows);
3b6748e2d   Johannes Weiner   mm: introduce fol...
889
890
  int follow_pfn(struct vm_area_struct *vma, unsigned long address,
  	unsigned long *pfn);
d87fe6607   venkatesh.pallipadi@intel.com   x86: PAT: modify ...
891
892
  int follow_phys(struct vm_area_struct *vma, unsigned long address,
  		unsigned int flags, unsigned long *prot, resource_size_t *phys);
28b2ee20c   Rik van Riel   access_process_vm...
893
894
  int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
  			void *buf, int len, int write);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
895
896
897
898
899
900
  
  static inline void unmap_shared_mapping_range(struct address_space *mapping,
  		loff_t const holebegin, loff_t const holelen)
  {
  	unmap_mapping_range(mapping, holebegin, holelen, 0);
  }
25d9e2d15   npiggin@suse.de   truncate: new hel...
901
  extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
2c27c65ed   Christoph Hellwig   check ATTR_SIZE c...
902
  extern void truncate_setsize(struct inode *inode, loff_t newsize);
25d9e2d15   npiggin@suse.de   truncate: new hel...
903
904
  extern int vmtruncate(struct inode *inode, loff_t offset);
  extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
f33ea7f40   Nick Piggin   [PATCH] fix get_u...
905

750b4987b   Nick Piggin   HWPOISON: Refacto...
906
  int truncate_inode_page(struct address_space *mapping, struct page *page);
257187362   Andi Kleen   HWPOISON: Define ...
907
  int generic_error_remove_page(struct address_space *mapping, struct page *page);
750b4987b   Nick Piggin   HWPOISON: Refacto...
908

83f786680   Wu Fengguang   HWPOISON: Add inv...
909
  int invalidate_inode_page(struct page *page);
7ee1dd3fe   David Howells   [PATCH] FRV: Make...
910
  #ifdef CONFIG_MMU
83c54070e   Nick Piggin   mm: fault feedbac...
911
  extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
d06063cc2   Linus Torvalds   Move FAULT_FLAG_x...
912
  			unsigned long address, unsigned int flags);
7ee1dd3fe   David Howells   [PATCH] FRV: Make...
913
914
915
  #else
  static inline int handle_mm_fault(struct mm_struct *mm,
  			struct vm_area_struct *vma, unsigned long address,
d06063cc2   Linus Torvalds   Move FAULT_FLAG_x...
916
  			unsigned int flags)
7ee1dd3fe   David Howells   [PATCH] FRV: Make...
917
918
919
920
921
922
  {
  	/* should never happen if there's no MMU */
  	BUG();
  	return VM_FAULT_SIGBUS;
  }
  #endif
f33ea7f40   Nick Piggin   [PATCH] fix get_u...
923

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
924
925
  extern int make_pages_present(unsigned long addr, unsigned long end);
  extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
926

d2bf6be8a   Nick Piggin   mm: clean up get_...
927
  int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
9d73777e5   Peter Zijlstra   clarify get_user_...
928
  			unsigned long start, int nr_pages, int write, int force,
d2bf6be8a   Nick Piggin   mm: clean up get_...
929
930
931
  			struct page **pages, struct vm_area_struct **vmas);
  int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  			struct page **pages);
f3e8fccd0   Hugh Dickins   mm: add get_dump_...
932
  struct page *get_dump_page(unsigned long addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
933

cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
934
935
  extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
  extern void do_invalidatepage(struct page *page, unsigned long offset);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
936
  int __set_page_dirty_nobuffers(struct page *page);
767193253   Ken Chen   [PATCH] simplify ...
937
  int __set_page_dirty_no_writeback(struct page *page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
938
939
  int redirty_page_for_writepage(struct writeback_control *wbc,
  				struct page *page);
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
940
  void account_page_dirtied(struct page *page, struct address_space *mapping);
f629d1c9b   Michael Rubin   mm: add account_p...
941
  void account_page_writeback(struct page *page);
b3c975286   Harvey Harrison   include/linux: Re...
942
  int set_page_dirty(struct page *page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
943
944
  int set_page_dirty_lock(struct page *page);
  int clear_page_dirty_for_io(struct page *page);
39aa3cb3e   Stefan Bader   mm: Move vma_stac...
945
946
947
948
949
  /* Is the vma a continuation of the stack vma above it? */
  static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
  {
  	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
  }
b6a2fea39   Ollie Wild   mm: variable leng...
950
951
952
  extern unsigned long move_page_tables(struct vm_area_struct *vma,
  		unsigned long old_addr, struct vm_area_struct *new_vma,
  		unsigned long new_addr, unsigned long len);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
953
954
955
  extern unsigned long do_mremap(unsigned long addr,
  			       unsigned long old_len, unsigned long new_len,
  			       unsigned long flags, unsigned long new_addr);
b6a2fea39   Ollie Wild   mm: variable leng...
956
957
958
  extern int mprotect_fixup(struct vm_area_struct *vma,
  			  struct vm_area_struct **pprev, unsigned long start,
  			  unsigned long end, unsigned long newflags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
959

21cc199ba   Nick Piggin   mm: introduce get...
960
  /*
465a454f2   Peter Zijlstra   x86, mm: Add __ge...
961
962
963
964
   * doesn't attempt to fault and will return short.
   */
  int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
  			  struct page **pages);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
965
966
967
  /*
   * per-process(per-mm_struct) statistics.
   */
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
968
  #if defined(SPLIT_RSS_COUNTING)
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
969
970
971
972
973
974
975
976
  /*
   * The mm counters are not protected by its page_table_lock,
   * so must be incremented atomically.
   */
  static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
  {
  	atomic_long_set(&mm->rss_stat.count[member], value);
  }
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
977
  unsigned long get_mm_counter(struct mm_struct *mm, int member);
d559db086   KAMEZAWA Hiroyuki   mm: clean up mm_c...
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
  
  static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
  {
  	atomic_long_add(value, &mm->rss_stat.count[member]);
  }
  
  static inline void inc_mm_counter(struct mm_struct *mm, int member)
  {
  	atomic_long_inc(&mm->rss_stat.count[member]);
  }
  
  static inline void dec_mm_counter(struct mm_struct *mm, int member)
  {
  	atomic_long_dec(&mm->rss_stat.count[member]);
  }
  
  #else  /* !USE_SPLIT_PTLOCKS */
  /*
   * The mm counters are protected by its page_table_lock,
   * so can be incremented directly.
   */
  static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
  {
  	mm->rss_stat.count[member] = value;
  }
  
  static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
  {
  	return mm->rss_stat.count[member];
  }
  
  static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
  {
  	mm->rss_stat.count[member] += value;
  }
  
  static inline void inc_mm_counter(struct mm_struct *mm, int member)
  {
  	mm->rss_stat.count[member]++;
  }
  
  static inline void dec_mm_counter(struct mm_struct *mm, int member)
  {
  	mm->rss_stat.count[member]--;
  }
  
  #endif /* !USE_SPLIT_PTLOCKS */
  
  static inline unsigned long get_mm_rss(struct mm_struct *mm)
  {
  	return get_mm_counter(mm, MM_FILEPAGES) +
  		get_mm_counter(mm, MM_ANONPAGES);
  }
  
  static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
  {
  	return max(mm->hiwater_rss, get_mm_rss(mm));
  }
  
  static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
  {
  	return max(mm->hiwater_vm, mm->total_vm);
  }
  
  static inline void update_hiwater_rss(struct mm_struct *mm)
  {
  	unsigned long _rss = get_mm_rss(mm);
  
  	if ((mm)->hiwater_rss < _rss)
  		(mm)->hiwater_rss = _rss;
  }
  
  static inline void update_hiwater_vm(struct mm_struct *mm)
  {
  	if (mm->hiwater_vm < mm->total_vm)
  		mm->hiwater_vm = mm->total_vm;
  }
  
  static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
  					 struct mm_struct *mm)
  {
  	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
  
  	if (*maxrss < hiwater_rss)
  		*maxrss = hiwater_rss;
  }
53bddb4e9   KAMEZAWA Hiroyuki   nommu: fix build ...
1064
  #if defined(SPLIT_RSS_COUNTING)
34e55232e   KAMEZAWA Hiroyuki   mm: avoid false s...
1065
  void sync_mm_rss(struct task_struct *task, struct mm_struct *mm);
53bddb4e9   KAMEZAWA Hiroyuki   nommu: fix build ...
1066
1067
1068
1069
1070
  #else
  static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
  {
  }
  #endif
465a454f2   Peter Zijlstra   x86, mm: Add __ge...
1071
1072
  
  /*
8e1f936b7   Rusty Russell   mm: clean up and ...
1073
   * A callback you can register to apply pressure to ageable caches.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1074
   *
8e1f936b7   Rusty Russell   mm: clean up and ...
1075
1076
1077
1078
1079
   * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'.  It should
   * look through the least-recently-used 'nr_to_scan' entries and
   * attempt to free them up.  It should return the number of objects
   * which remain in the cache.  If it returns -1, it means it cannot do
   * any scanning at this time (eg. there is a risk of deadlock).
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1080
   *
8e1f936b7   Rusty Russell   mm: clean up and ...
1081
1082
1083
1084
1085
   * The 'gfpmask' refers to the allocation we are currently trying to
   * fulfil.
   *
   * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
   * querying the cache size, so a fastpath for that case is appropriate.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1086
   */
8e1f936b7   Rusty Russell   mm: clean up and ...
1087
  struct shrinker {
7f8275d0d   Dave Chinner   mm: add context a...
1088
  	int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask);
8e1f936b7   Rusty Russell   mm: clean up and ...
1089
  	int seeks;	/* seeks to recreate an obj */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1090

8e1f936b7   Rusty Russell   mm: clean up and ...
1091
1092
1093
1094
1095
1096
1097
  	/* These are for internal use */
  	struct list_head list;
  	long nr;	/* objs pending delete */
  };
  #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
  extern void register_shrinker(struct shrinker *);
  extern void unregister_shrinker(struct shrinker *);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1098

4e950f6f0   Alexey Dobriyan   Remove fs.h from ...
1099
  int vma_wants_writenotify(struct vm_area_struct *vma);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
1100

25ca1d6c0   Namhyung Kim   mm: wrap get_lock...
1101
1102
1103
1104
1105
1106
1107
1108
1109
  extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
  			       spinlock_t **ptl);
  static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
  				    spinlock_t **ptl)
  {
  	pte_t *ptep;
  	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
  	return ptep;
  }
c9cfcddfd   Linus Torvalds   VM: add common he...
1110

5f22df00a   Nick Piggin   mm: remove gcc wo...
1111
1112
1113
1114
1115
1116
1117
  #ifdef __PAGETABLE_PUD_FOLDED
  static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
  						unsigned long address)
  {
  	return 0;
  }
  #else
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
1118
  int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
5f22df00a   Nick Piggin   mm: remove gcc wo...
1119
1120
1121
1122
1123
1124
1125
1126
1127
  #endif
  
  #ifdef __PAGETABLE_PMD_FOLDED
  static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
  						unsigned long address)
  {
  	return 0;
  }
  #else
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
1128
  int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
5f22df00a   Nick Piggin   mm: remove gcc wo...
1129
  #endif
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
1130
1131
  int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
  		pmd_t *pmd, unsigned long address);
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
1132
  int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1133
1134
1135
1136
  /*
   * The following ifdef needed to get the 4level-fixup.h header to work.
   * Remove it when 4level-fixup.h has been removed.
   */
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
1137
  #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1138
1139
  static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  {
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
1140
1141
  	return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
  		NULL: pud_offset(pgd, address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1142
1143
1144
1145
  }
  
  static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  {
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
1146
1147
  	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
  		NULL: pmd_offset(pud, address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1148
  }
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
1149
  #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
f7d0b926a   Jeremy Fitzhardinge   mm: define USE_SP...
1150
  #if USE_SPLIT_PTLOCKS
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1151
1152
1153
1154
1155
1156
  /*
   * We tuck a spinlock to guard each pagetable page into its struct page,
   * at page->private, with BUILD_BUG_ON to make sure that this will not
   * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
   * When freeing, reset page->mapping so free_pages_check won't complain.
   */
349aef0bc   Andrew Morton   [PATCH] shrink st...
1157
  #define __pte_lockptr(page)	&((page)->ptl)
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1158
1159
1160
1161
1162
  #define pte_lock_init(_page)	do {					\
  	spin_lock_init(__pte_lockptr(_page));				\
  } while (0)
  #define pte_lock_deinit(page)	((page)->mapping = NULL)
  #define pte_lockptr(mm, pmd)	({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
f7d0b926a   Jeremy Fitzhardinge   mm: define USE_SP...
1163
  #else	/* !USE_SPLIT_PTLOCKS */
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1164
1165
1166
1167
1168
1169
  /*
   * We use mm->page_table_lock to guard all pagetable pages of the mm.
   */
  #define pte_lock_init(page)	do {} while (0)
  #define pte_lock_deinit(page)	do {} while (0)
  #define pte_lockptr(mm, pmd)	({(void)(pmd); &(mm)->page_table_lock;})
f7d0b926a   Jeremy Fitzhardinge   mm: define USE_SP...
1170
  #endif /* USE_SPLIT_PTLOCKS */
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1171

2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
  static inline void pgtable_page_ctor(struct page *page)
  {
  	pte_lock_init(page);
  	inc_zone_page_state(page, NR_PAGETABLE);
  }
  
  static inline void pgtable_page_dtor(struct page *page)
  {
  	pte_lock_deinit(page);
  	dec_zone_page_state(page, NR_PAGETABLE);
  }
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
1183
1184
  #define pte_offset_map_lock(mm, pmd, address, ptlp)	\
  ({							\
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
1185
  	spinlock_t *__ptl = pte_lockptr(mm, pmd);	\
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
  	pte_t *__pte = pte_offset_map(pmd, address);	\
  	*(ptlp) = __ptl;				\
  	spin_lock(__ptl);				\
  	__pte;						\
  })
  
  #define pte_unmap_unlock(pte, ptl)	do {		\
  	spin_unlock(ptl);				\
  	pte_unmap(pte);					\
  } while (0)
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
1196
1197
1198
1199
  #define pte_alloc_map(mm, vma, pmd, address)				\
  	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,	\
  							pmd, address))?	\
  	 NULL: pte_offset_map(pmd, address))
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
1200

c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
1201
  #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
1202
1203
  	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,	\
  							pmd, address))?	\
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
1204
  		NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
1205
  #define pte_alloc_kernel(pmd, address)			\
8ac1f8320   Andrea Arcangeli   thp: pte alloc tr...
1206
  	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
1207
  		NULL: pte_offset_kernel(pmd, address))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1208
1209
  
  extern void free_area_init(unsigned long * zones_size);
9109fb7b3   Johannes Weiner   mm: drop unneeded...
1210
1211
  extern void free_area_init_node(int nid, unsigned long * zones_size,
  		unsigned long zone_start_pfn, unsigned long *zholes_size);
c713216de   Mel Gorman   [PATCH] Introduce...
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
  #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  /*
   * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
   * zones, allocate the backing mem_map and account for memory holes in a more
   * architecture independent manner. This is a substitute for creating the
   * zone_sizes[] and zholes_size[] arrays and passing them to
   * free_area_init_node()
   *
   * An architecture is expected to register range of page frames backed by
   * physical memory with add_active_range() before calling
   * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
   * usage, an architecture is expected to do something like
   *
   * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
   * 							 max_highmem_pfn};
   * for_each_valid_physical_page_range()
   * 	add_active_range(node_id, start_pfn, end_pfn)
   * free_area_init_nodes(max_zone_pfns);
   *
   * If the architecture guarantees that there are no holes in the ranges
   * registered with add_active_range(), free_bootmem_active_regions()
   * will call free_bootmem_node() for each registered physical page range.
   * Similarly sparse_memory_present_with_active_regions() calls
   * memory_present() for each range when SPARSEMEM is enabled.
   *
   * See mm/page_alloc.c for more information on each function exposed by
   * CONFIG_ARCH_POPULATES_NODE_MAP
   */
  extern void free_area_init_nodes(unsigned long *max_zone_pfn);
  extern void add_active_range(unsigned int nid, unsigned long start_pfn,
  					unsigned long end_pfn);
cc1050baf   Yinghai Lu   x86: replace shri...
1243
1244
  extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
  					unsigned long end_pfn);
c713216de   Mel Gorman   [PATCH] Introduce...
1245
  extern void remove_all_active_ranges(void);
329962503   Yinghai Lu   x86: Fix checking...
1246
1247
1248
  void sort_node_map(void);
  unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
  						unsigned long end_pfn);
c713216de   Mel Gorman   [PATCH] Introduce...
1249
1250
1251
1252
1253
  extern unsigned long absent_pages_in_range(unsigned long start_pfn,
  						unsigned long end_pfn);
  extern void get_pfn_range_for_nid(unsigned int nid,
  			unsigned long *start_pfn, unsigned long *end_pfn);
  extern unsigned long find_min_pfn_with_active_regions(void);
c713216de   Mel Gorman   [PATCH] Introduce...
1254
1255
  extern void free_bootmem_with_active_regions(int nid,
  						unsigned long max_low_pfn);
08677214e   Yinghai Lu   x86: Make 64 bit ...
1256
1257
  int add_from_early_node_map(struct range *range, int az,
  				   int nr_range, int nid);
edbe7d23b   Yinghai Lu   memblock: Add fin...
1258
1259
  u64 __init find_memory_core_early(int nid, u64 size, u64 align,
  					u64 goal, u64 limit);
08677214e   Yinghai Lu   x86: Make 64 bit ...
1260
1261
  void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
  				 u64 goal, u64 limit);
d52d53b8a   Yinghai Lu   RFC x86: try to r...
1262
  typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
b5bc6c0e5   Yinghai Lu   x86, mm: use add_...
1263
  extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
c713216de   Mel Gorman   [PATCH] Introduce...
1264
  extern void sparse_memory_present_with_active_regions(int nid);
c713216de   Mel Gorman   [PATCH] Introduce...
1265
  #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
f2dbcfa73   KAMEZAWA Hiroyuki   mm: clean up for ...
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
  
  #if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
      !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
  static inline int __early_pfn_to_nid(unsigned long pfn)
  {
  	return 0;
  }
  #else
  /* please see mm/page_alloc.c */
  extern int __meminit early_pfn_to_nid(unsigned long pfn);
  #ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  /* there is a per-arch backend function. */
  extern int __meminit __early_pfn_to_nid(unsigned long pfn);
  #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  #endif
0e0b864e0   Mel Gorman   [PATCH] Account f...
1281
  extern void set_dma_reserve(unsigned long new_dma_reserve);
a2f3aa025   Dave Hansen   [PATCH] Fix spars...
1282
1283
  extern void memmap_init_zone(unsigned long, int, unsigned long,
  				unsigned long, enum memmap_context);
bc75d33f0   Minchan Kim   page-allocator: c...
1284
  extern void setup_per_zone_wmarks(void);
96cb4df5d   Minchan Kim   page-allocator: a...
1285
  extern void calculate_zone_inactive_ratio(struct zone *zone);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1286
  extern void mem_init(void);
8feae1311   David Howells   NOMMU: Make VMAs ...
1287
  extern void __init mmap_init(void);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1288
1289
1290
  extern void show_mem(void);
  extern void si_meminfo(struct sysinfo * val);
  extern void si_meminfo_node(struct sysinfo *val, int nid);
3461b0af0   Mike Travis   x86: remove stati...
1291
  extern int after_bootmem;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1292

e7c8d5c99   Christoph Lameter   [PATCH] node loca...
1293
  extern void setup_per_cpu_pageset(void);
e7c8d5c99   Christoph Lameter   [PATCH] node loca...
1294

112067f09   Shaohua Li   memory hotplug: u...
1295
  extern void zone_pcp_update(struct zone *zone);
8feae1311   David Howells   NOMMU: Make VMAs ...
1296
  /* nommu.c */
33e5d7697   David Howells   nommu: fix a numb...
1297
  extern atomic_long_t mmap_pages_allocated;
7e6608724   David Howells   nommu: fix shared...
1298
  extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
8feae1311   David Howells   NOMMU: Make VMAs ...
1299

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
  /* prio_tree.c */
  void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
  void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
  void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
  struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
  	struct prio_tree_iter *iter);
  
  #define vma_prio_tree_foreach(vma, iter, root, begin, end)	\
  	for (prio_tree_iter_init(iter, root, begin, end), vma = NULL;	\
  		(vma = vma_prio_tree_next(vma, iter)); )
  
  static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
  					struct list_head *list)
  {
  	vma->shared.vm_set.parent = NULL;
  	list_add_tail(&vma->shared.vm_set.list, list);
  }
  
  /* mmap.c */
34b4e4aa3   Alan Cox   fix NULL pointer ...
1319
  extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
5beb49305   Rik van Riel   mm: change anon_v...
1320
  extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
  	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
  extern struct vm_area_struct *vma_merge(struct mm_struct *,
  	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
  	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
  	struct mempolicy *);
  extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
  extern int split_vma(struct mm_struct *,
  	struct vm_area_struct *, unsigned long addr, int new_below);
  extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
  extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
  	struct rb_node **, struct rb_node *);
a8fb5618d   Hugh Dickins   [PATCH] mm: unlin...
1332
  extern void unlink_file_vma(struct vm_area_struct *);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1333
1334
1335
  extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
  	unsigned long addr, unsigned long len, pgoff_t pgoff);
  extern void exit_mmap(struct mm_struct *);
925d1c401   Matt Helsley   procfs task exe s...
1336

7906d00cd   Andrea Arcangeli   mmu-notifiers: ad...
1337
1338
  extern int mm_take_all_locks(struct mm_struct *mm);
  extern void mm_drop_all_locks(struct mm_struct *mm);
925d1c401   Matt Helsley   procfs task exe s...
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
  #ifdef CONFIG_PROC_FS
  /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
  extern void added_exe_file_vma(struct mm_struct *mm);
  extern void removed_exe_file_vma(struct mm_struct *mm);
  #else
  static inline void added_exe_file_vma(struct mm_struct *mm)
  {}
  
  static inline void removed_exe_file_vma(struct mm_struct *mm)
  {}
  #endif /* CONFIG_PROC_FS */
119f657c7   Andrew Morton   [PATCH] RLIMIT_AS...
1350
  extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
fa5dc22f8   Roland McGrath   [PATCH] Add insta...
1351
1352
1353
  extern int install_special_mapping(struct mm_struct *mm,
  				   unsigned long addr, unsigned long len,
  				   unsigned long flags, struct page **pages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1354
1355
1356
1357
1358
1359
  
  extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
  
  extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
  	unsigned long flag, unsigned long pgoff);
0165ab443   Miklos Szeredi   split mmap
1360
1361
  extern unsigned long mmap_region(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long flags,
5a6fe1259   Mel Gorman   Do not account fo...
1362
  	unsigned int vm_flags, unsigned long pgoff);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
  
  static inline unsigned long do_mmap(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
  	unsigned long flag, unsigned long offset)
  {
  	unsigned long ret = -EINVAL;
  	if ((offset + PAGE_ALIGN(len)) < offset)
  		goto out;
  	if (!(offset & ~PAGE_MASK))
  		ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
  out:
  	return ret;
  }
  
  extern int do_munmap(struct mm_struct *, unsigned long, size_t);
  
  extern unsigned long do_brk(unsigned long, unsigned long);
  
  /* filemap.c */
  extern unsigned long page_unuse(struct page *);
  extern void truncate_inode_pages(struct address_space *, loff_t);
d7339071f   Hans Reiser   [PATCH] reiser4: ...
1384
1385
  extern void truncate_inode_pages_range(struct address_space *,
  				       loff_t lstart, loff_t lend);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1386
1387
  
  /* generic vm_area_ops exported for stackable file systems */
d0217ac04   Nick Piggin   mm: fault feedbac...
1388
  extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1389
1390
1391
  
  /* mm/page-writeback.c */
  int write_one_page(struct page *page, int wait);
1cf6e7d83   Nick Piggin   mm: task dirty ac...
1392
  void task_dirty_inc(struct task_struct *tsk);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1393
1394
1395
1396
  
  /* readahead.c */
  #define VM_MAX_READAHEAD	128	/* kbytes */
  #define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1397

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1398
  int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
7361f4d8c   Andrew Morton   [PATCH] readahead...
1399
  			pgoff_t offset, unsigned long nr_to_read);
cf914a7d6   Rusty Russell   readahead: split ...
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
  
  void page_cache_sync_readahead(struct address_space *mapping,
  			       struct file_ra_state *ra,
  			       struct file *filp,
  			       pgoff_t offset,
  			       unsigned long size);
  
  void page_cache_async_readahead(struct address_space *mapping,
  				struct file_ra_state *ra,
  				struct file *filp,
  				struct page *pg,
  				pgoff_t offset,
  				unsigned long size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1413
  unsigned long max_sane_readahead(unsigned long nr);
d30a11004   Wu Fengguang   readahead: record...
1414
1415
1416
  unsigned long ra_submit(struct file_ra_state *ra,
  			struct address_space *mapping,
  			struct file *filp);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1417
1418
  
  /* Do stack extension */
46dea3d09   Hugh Dickins   [PATCH] mm: ia64 ...
1419
  extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
8ca3eb080   Tony Luck   guard page for st...
1420
  #if VM_GROWSUP
46dea3d09   Hugh Dickins   [PATCH] mm: ia64 ...
1421
  extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
8ca3eb080   Tony Luck   guard page for st...
1422
1423
  #else
    #define expand_upwards(vma, address) do { } while (0)
9ab885154   Matthew Wilcox   [PARISC] Fix comp...
1424
  #endif
b6a2fea39   Ollie Wild   mm: variable leng...
1425
1426
  extern int expand_stack_downwards(struct vm_area_struct *vma,
  				  unsigned long address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
  
  /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
  extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
  extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
  					     struct vm_area_struct **pprev);
  
  /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
     NULL if none.  Assume start_addr < end_addr. */
  static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
  {
  	struct vm_area_struct * vma = find_vma(mm,start_addr);
  
  	if (vma && end_addr <= vma->vm_start)
  		vma = NULL;
  	return vma;
  }
  
  static inline unsigned long vma_pages(struct vm_area_struct *vma)
  {
  	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  }
bad849b3d   David Howells   NOMMU: Stub out v...
1448
  #ifdef CONFIG_MMU
804af2cf6   Hugh Dickins   [AGPGART] remove ...
1449
  pgprot_t vm_get_page_prot(unsigned long vm_flags);
bad849b3d   David Howells   NOMMU: Stub out v...
1450
1451
1452
1453
1454
1455
  #else
  static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
  {
  	return __pgprot(0);
  }
  #endif
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1456
  struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1457
1458
  int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
  			unsigned long pfn, unsigned long size, pgprot_t);
a145dd411   Linus Torvalds   VM: add "vm_inser...
1459
  int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
e0dc0d8f4   Nick Piggin   [PATCH] add vm_in...
1460
1461
  int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
  			unsigned long pfn);
423bad600   Nick Piggin   mm: add vm_insert...
1462
1463
  int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  			unsigned long pfn);
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1464

6aab341e0   Linus Torvalds   mm: re-architect ...
1465
  struct page *follow_page(struct vm_area_struct *, unsigned long address,
deceb6cd1   Hugh Dickins   [PATCH] mm: follo...
1466
1467
1468
1469
  			unsigned int foll_flags);
  #define FOLL_WRITE	0x01	/* check pte is writable */
  #define FOLL_TOUCH	0x02	/* mark page accessed */
  #define FOLL_GET	0x04	/* do get_page on page */
8e4b9a607   Hugh Dickins   mm: FOLL_DUMP rep...
1470
  #define FOLL_DUMP	0x08	/* give error on hole if it would be zero */
58fa879e1   Hugh Dickins   mm: FOLL flags fo...
1471
  #define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
110d74a92   Michel Lespinasse   mm: add FOLL_MLOC...
1472
  #define FOLL_MLOCK	0x40	/* mark page as mlocked */
500d65d47   Andrea Arcangeli   thp: pmd_trans_hu...
1473
  #define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1474

2f569afd9   Martin Schwidefsky   CONFIG_HIGHPTE vs...
1475
  typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
aee16b3ce   Jeremy Fitzhardinge   Add apply_to_page...
1476
1477
1478
  			void *data);
  extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
  			       unsigned long size, pte_fn_t fn, void *data);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1479
  #ifdef CONFIG_PROC_FS
ab50b8ed8   Hugh Dickins   [PATCH] mm: vm_st...
1480
  void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1481
  #else
ab50b8ed8   Hugh Dickins   [PATCH] mm: vm_st...
1482
  static inline void vm_stat_account(struct mm_struct *mm,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1483
1484
1485
1486
  			unsigned long flags, struct file *file, long pages)
  {
  }
  #endif /* CONFIG_PROC_FS */
12d6f21ea   Ingo Molnar   x86: do not PSE o...
1487
1488
1489
1490
1491
1492
1493
1494
1495
  #ifdef CONFIG_DEBUG_PAGEALLOC
  extern int debug_pagealloc_enabled;
  
  extern void kernel_map_pages(struct page *page, int numpages, int enable);
  
  static inline void enable_debug_pagealloc(void)
  {
  	debug_pagealloc_enabled = 1;
  }
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1496
1497
1498
  #ifdef CONFIG_HIBERNATION
  extern bool kernel_page_present(struct page *page);
  #endif /* CONFIG_HIBERNATION */
12d6f21ea   Ingo Molnar   x86: do not PSE o...
1499
  #else
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1500
  static inline void
9858db504   Nick Piggin   [PATCH] mm: locks...
1501
  kernel_map_pages(struct page *page, int numpages, int enable) {}
12d6f21ea   Ingo Molnar   x86: do not PSE o...
1502
1503
1504
  static inline void enable_debug_pagealloc(void)
  {
  }
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1505
1506
1507
  #ifdef CONFIG_HIBERNATION
  static inline bool kernel_page_present(struct page *page) { return true; }
  #endif /* CONFIG_HIBERNATION */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
  #endif
  
  extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
  #ifdef	__HAVE_ARCH_GATE_AREA
  int in_gate_area_no_task(unsigned long addr);
  int in_gate_area(struct task_struct *task, unsigned long addr);
  #else
  int in_gate_area_no_task(unsigned long addr);
  #define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
  #endif	/* __HAVE_ARCH_GATE_AREA */
8d65af789   Alexey Dobriyan   sysctl: remove "s...
1518
  int drop_caches_sysctl_handler(struct ctl_table *, int,
9d0243bca   Andrew Morton   [PATCH] drop-page...
1519
  					void __user *, size_t *, loff_t *);
69e05944a   Andrew Morton   [PATCH] vmscan: u...
1520
  unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
9d0243bca   Andrew Morton   [PATCH] drop-page...
1521
  			unsigned long lru_pages);
9d0243bca   Andrew Morton   [PATCH] drop-page...
1522

7a9166e3b   Luke Yang   [PATCH] Fix undef...
1523
1524
1525
  #ifndef CONFIG_MMU
  #define randomize_va_space 0
  #else
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
1526
  extern int randomize_va_space;
7a9166e3b   Luke Yang   [PATCH] Fix undef...
1527
  #endif
a62eaf151   Andi Kleen   [PATCH] x86_64: A...
1528

045e72acf   Sam Ravnborg   fix 'dynreloc mis...
1529
  const char * arch_vma_name(struct vm_area_struct *vma);
03252919b   Andi Kleen   x86: print which ...
1530
  void print_vma_addr(char *prefix, unsigned long rip);
e6e5494cb   Ingo Molnar   [PATCH] vdso: ran...
1531

9bdac9142   Yinghai Lu   sparsemem: Put me...
1532
1533
1534
1535
1536
  void sparse_mem_maps_populate_node(struct page **map_map,
  				   unsigned long pnum_begin,
  				   unsigned long pnum_end,
  				   unsigned long map_count,
  				   int nodeid);
98f3cfc1d   Yasunori Goto   memory hotplug: H...
1537
  struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
29c71111d   Andy Whitcroft   vmemmap: generify...
1538
1539
1540
1541
  pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
  pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
  pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
  pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
8f6aac419   Christoph Lameter   Generic Virtual M...
1542
  void *vmemmap_alloc_block(unsigned long size, int node);
9bdac9142   Yinghai Lu   sparsemem: Put me...
1543
  void *vmemmap_alloc_block_buf(unsigned long size, int node);
8f6aac419   Christoph Lameter   Generic Virtual M...
1544
  void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
29c71111d   Andy Whitcroft   vmemmap: generify...
1545
1546
1547
  int vmemmap_populate_basepages(struct page *start_page,
  						unsigned long pages, int node);
  int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
c2b91e2ee   Yinghai Lu   x86_64/mm: check ...
1548
  void vmemmap_populate_print_last(void);
8f6aac419   Christoph Lameter   Generic Virtual M...
1549

6a46079cf   Andi Kleen   HWPOISON: The hig...
1550

82ba011b9   Andi Kleen   HWPOISON: Turn re...
1551
1552
1553
  enum mf_flags {
  	MF_COUNT_INCREASED = 1 << 0,
  };
6a46079cf   Andi Kleen   HWPOISON: The hig...
1554
  extern void memory_failure(unsigned long pfn, int trapno);
82ba011b9   Andi Kleen   HWPOISON: Turn re...
1555
  extern int __memory_failure(unsigned long pfn, int trapno, int flags);
847ce401d   Wu Fengguang   HWPOISON: Add unp...
1556
  extern int unpoison_memory(unsigned long pfn);
6a46079cf   Andi Kleen   HWPOISON: The hig...
1557
1558
  extern int sysctl_memory_failure_early_kill;
  extern int sysctl_memory_failure_recovery;
facb6011f   Andi Kleen   HWPOISON: Add sof...
1559
  extern void shake_page(struct page *p, int access);
6a46079cf   Andi Kleen   HWPOISON: The hig...
1560
  extern atomic_long_t mce_bad_pages;
facb6011f   Andi Kleen   HWPOISON: Add sof...
1561
  extern int soft_offline_page(struct page *page, int flags);
bf998156d   Huang Ying   KVM: Avoid killin...
1562
1563
1564
1565
1566
1567
1568
1569
  #ifdef CONFIG_MEMORY_FAILURE
  int is_hwpoison_address(unsigned long addr);
  #else
  static inline int is_hwpoison_address(unsigned long addr)
  {
  	return 0;
  }
  #endif
6a46079cf   Andi Kleen   HWPOISON: The hig...
1570

718a38211   Wu Fengguang   mm: introduce dum...
1571
  extern void dump_page(struct page *page);
47ad8475c   Andrea Arcangeli   thp: clear_copy_h...
1572
1573
1574
1575
1576
1577
1578
1579
  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
  extern void clear_huge_page(struct page *page,
  			    unsigned long addr,
  			    unsigned int pages_per_huge_page);
  extern void copy_user_huge_page(struct page *dst, struct page *src,
  				unsigned long addr, struct vm_area_struct *vma,
  				unsigned int pages_per_huge_page);
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1580
1581
  #endif /* __KERNEL__ */
  #endif /* _LINUX_MM_H */