Blame view

include/linux/rmap.h 7.9 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
  #ifndef _LINUX_RMAP_H
  #define _LINUX_RMAP_H
  /*
   * Declarations for Reverse Mapping functions in mm/rmap.c
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
9
  #include <linux/list.h>
  #include <linux/slab.h>
  #include <linux/mm.h>
  #include <linux/spinlock.h>
bed7161a5   Balbir Singh   Memory controller...
10
  #include <linux/memcontrol.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
  
  /*
   * The anon_vma heads a list of private "related" vmas, to scan if
   * an anonymous page pointing to this anon_vma needs to be unmapped:
   * the vmas on the list will be related by forking, or by splitting.
   *
   * Since vmas come and go as they are split and merged (particularly
   * in mprotect), the mapping field of an anonymous page cannot point
   * directly to a vma: instead it points to an anon_vma, on whose list
   * the related vmas can be easily linked or unlinked.
   *
   * After unlinking the last vma on the list, we must garbage collect
   * the anon_vma object itself: we're guaranteed no page can be
   * pointing to this anon_vma once its vma list is empty.
   */
  struct anon_vma {
  	spinlock_t lock;	/* Serialize access to vma list */
5c341ee1d   Rik van Riel   mm: track the roo...
28
  	struct anon_vma *root;	/* Root of this anon_vma tree */
7f60c214f   Mel Gorman   mm: migration: sh...
29
30
31
32
33
34
35
36
37
38
39
  #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
  
  	/*
  	 * The external_refcount is taken by either KSM or page migration
  	 * to take a reference to an anon_vma when there is no
  	 * guarantee that the vma of page tables will exist for
  	 * the duration of the operation. A caller that takes
  	 * the reference is responsible for clearing up the
  	 * anon_vma if they are the last user on release
  	 */
  	atomic_t external_refcount;
3f6c82728   Mel Gorman   mm: migration: ta...
40
  #endif
7906d00cd   Andrea Arcangeli   mmu-notifiers: ad...
41
42
43
44
45
46
47
48
  	/*
  	 * NOTE: the LSB of the head.next is set by
  	 * mm_take_all_locks() _after_ taking the above lock. So the
  	 * head must only be read/written after taking the above lock
  	 * to be sure to see a valid next pointer. The LSB bit itself
  	 * is serialized by a system wide lock only visible to
  	 * mm_take_all_locks() (mm_all_locks_mutex).
  	 */
5beb49305   Rik van Riel   mm: change anon_v...
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
  	struct list_head head;	/* Chain of private "related" vmas */
  };
  
  /*
   * The copy-on-write semantics of fork mean that an anon_vma
   * can become associated with multiple processes. Furthermore,
   * each child process will have its own anon_vma, where new
   * pages for that process are instantiated.
   *
   * This structure allows us to find the anon_vmas associated
   * with a VMA, or the VMAs associated with an anon_vma.
   * The "same_vma" list contains the anon_vma_chains linking
   * all the anon_vmas associated with this VMA.
   * The "same_anon_vma" list contains the anon_vma_chains
   * which link all the VMAs associated with this anon_vma.
   */
  struct anon_vma_chain {
  	struct vm_area_struct *vma;
  	struct anon_vma *anon_vma;
  	struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */
  	struct list_head same_anon_vma;	/* locked by anon_vma->lock */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
70
71
72
  };
  
  #ifdef CONFIG_MMU
7f60c214f   Mel Gorman   mm: migration: sh...
73
74
  #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
  static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
db114b83a   Hugh Dickins   ksm: hold anon_vm...
75
  {
7f60c214f   Mel Gorman   mm: migration: sh...
76
  	atomic_set(&anon_vma->external_refcount, 0);
db114b83a   Hugh Dickins   ksm: hold anon_vm...
77
  }
7f60c214f   Mel Gorman   mm: migration: sh...
78
  static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
db114b83a   Hugh Dickins   ksm: hold anon_vm...
79
  {
7f60c214f   Mel Gorman   mm: migration: sh...
80
  	return atomic_read(&anon_vma->external_refcount);
db114b83a   Hugh Dickins   ksm: hold anon_vm...
81
  }
76545066c   Rik van Riel   mm: extend KSM re...
82
83
84
85
86
87
88
  
  static inline void get_anon_vma(struct anon_vma *anon_vma)
  {
  	atomic_inc(&anon_vma->external_refcount);
  }
  
  void drop_anon_vma(struct anon_vma *);
db114b83a   Hugh Dickins   ksm: hold anon_vm...
89
  #else
7f60c214f   Mel Gorman   mm: migration: sh...
90
  static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
db114b83a   Hugh Dickins   ksm: hold anon_vm...
91
92
  {
  }
7f60c214f   Mel Gorman   mm: migration: sh...
93
  static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
db114b83a   Hugh Dickins   ksm: hold anon_vm...
94
95
96
  {
  	return 0;
  }
76545066c   Rik van Riel   mm: extend KSM re...
97
98
99
100
101
102
103
104
  
  static inline void get_anon_vma(struct anon_vma *anon_vma)
  {
  }
  
  static inline void drop_anon_vma(struct anon_vma *anon_vma)
  {
  }
db114b83a   Hugh Dickins   ksm: hold anon_vm...
105
  #endif /* CONFIG_KSM */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
106

3ca7b3c5b   Hugh Dickins   mm: define PAGE_M...
107
108
109
110
111
112
113
  static inline struct anon_vma *page_anon_vma(struct page *page)
  {
  	if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
  					    PAGE_MAPPING_ANON)
  		return NULL;
  	return page_rmapping(page);
  }
bb4a340e0   Rik van Riel   mm: rename anon_v...
114
  static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
115
116
117
  {
  	struct anon_vma *anon_vma = vma->anon_vma;
  	if (anon_vma)
012f18004   Rik van Riel   mm: always lock t...
118
  		spin_lock(&anon_vma->root->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
119
  }
bb4a340e0   Rik van Riel   mm: rename anon_v...
120
  static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
121
122
123
  {
  	struct anon_vma *anon_vma = vma->anon_vma;
  	if (anon_vma)
012f18004   Rik van Riel   mm: always lock t...
124
  		spin_unlock(&anon_vma->root->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
125
  }
cba48b98f   Rik van Riel   mm: change direct...
126
127
  static inline void anon_vma_lock(struct anon_vma *anon_vma)
  {
012f18004   Rik van Riel   mm: always lock t...
128
  	spin_lock(&anon_vma->root->lock);
cba48b98f   Rik van Riel   mm: change direct...
129
130
131
132
  }
  
  static inline void anon_vma_unlock(struct anon_vma *anon_vma)
  {
012f18004   Rik van Riel   mm: always lock t...
133
  	spin_unlock(&anon_vma->root->lock);
cba48b98f   Rik van Riel   mm: change direct...
134
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
135
136
137
138
139
  /*
   * anon_vma helper functions.
   */
  void anon_vma_init(void);	/* create anon_vma_cachep */
  int  anon_vma_prepare(struct vm_area_struct *);
5beb49305   Rik van Riel   mm: change anon_v...
140
141
142
  void unlink_anon_vmas(struct vm_area_struct *);
  int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
  int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
143
  void __anon_vma_link(struct vm_area_struct *);
db114b83a   Hugh Dickins   ksm: hold anon_vm...
144
  void anon_vma_free(struct anon_vma *);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
145

5beb49305   Rik van Riel   mm: change anon_v...
146
147
148
149
150
151
  static inline void anon_vma_merge(struct vm_area_struct *vma,
  				  struct vm_area_struct *next)
  {
  	VM_BUG_ON(vma->anon_vma != next->anon_vma);
  	unlink_anon_vmas(next);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
152
153
154
  /*
   * rmap interfaces called when adding or removing pte of page
   */
c44b67432   Rik van Riel   rmap: move exclus...
155
  void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
156
  void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
ad8c2ee80   Rik van Riel   rmap: add exclusi...
157
158
  void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
  			   unsigned long, int);
9617d95e6   Nick Piggin   [PATCH] mm: rmap ...
159
  void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
160
  void page_add_file_rmap(struct page *);
edc315fd2   Hugh Dickins   badpage: remove v...
161
  void page_remove_rmap(struct page *);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
162

0fe6e20b9   Naoya Horiguchi   hugetlb, rmap: ad...
163
164
165
166
  void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
  			    unsigned long);
  void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
  				unsigned long);
21333b2b6   Hugh Dickins   ksm: no debug in ...
167
  static inline void page_dup_rmap(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
168
169
170
171
172
173
174
  {
  	atomic_inc(&page->_mapcount);
  }
  
  /*
   * Called from mm/vmscan.c to handle paging out
   */
6fe6b7e35   Wu Fengguang   vmscan: report vm...
175
176
  int page_referenced(struct page *, int is_locked,
  			struct mem_cgroup *cnt, unsigned long *vm_flags);
5ad646880   Hugh Dickins   ksm: let shared p...
177
178
  int page_referenced_one(struct page *, struct vm_area_struct *,
  	unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
14fa31b89   Andi Kleen   HWPOISON: Use bit...
179
180
181
182
183
184
185
186
  enum ttu_flags {
  	TTU_UNMAP = 0,			/* unmap mode */
  	TTU_MIGRATION = 1,		/* migration mode */
  	TTU_MUNLOCK = 2,		/* munlock mode */
  	TTU_ACTION_MASK = 0xff,
  
  	TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */
  	TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */
888b9f7c5   Andi Kleen   HWPOISON: Handle ...
187
  	TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
14fa31b89   Andi Kleen   HWPOISON: Use bit...
188
189
190
191
  };
  #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
  
  int try_to_unmap(struct page *, enum ttu_flags flags);
5ad646880   Hugh Dickins   ksm: let shared p...
192
193
  int try_to_unmap_one(struct page *, struct vm_area_struct *,
  			unsigned long address, enum ttu_flags flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
194
195
  
  /*
ceffc0785   Carsten Otte   [PATCH] xip: fs/m...
196
197
   * Called from mm/filemap_xip.c to unmap empty zero page
   */
c0718806c   Hugh Dickins   [PATCH] mm: rmap ...
198
  pte_t *page_check_address(struct page *, struct mm_struct *,
479db0bf4   Nick Piggin   mm: dirty page tr...
199
  				unsigned long, spinlock_t **, int);
ceffc0785   Carsten Otte   [PATCH] xip: fs/m...
200
201
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
202
203
204
   * Used by swapoff to help locate where page is expected in vma.
   */
  unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
205
206
207
208
209
210
211
  /*
   * Cleans the PTEs of shared mappings.
   * (and since clean PTEs should also be readonly, write protects them too)
   *
   * returns the number of cleaned PTEs.
   */
  int page_mkclean(struct page *);
b291f0003   Nick Piggin   mlock: mlocked pa...
212
213
214
215
216
  /*
   * called in munlock()/munmap() path to check for other vmas holding
   * the page mlocked.
   */
  int try_to_munlock(struct page *);
b291f0003   Nick Piggin   mlock: mlocked pa...
217

10be22dfe   Andi Kleen   HWPOISON: Export ...
218
219
220
221
222
  /*
   * Called by memory-failure.c to kill processes.
   */
  struct anon_vma *page_lock_anon_vma(struct page *page);
  void page_unlock_anon_vma(struct anon_vma *anon_vma);
6a46079cf   Andi Kleen   HWPOISON: The hig...
223
  int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
10be22dfe   Andi Kleen   HWPOISON: Export ...
224

e9995ef97   Hugh Dickins   ksm: rmap_walk to...
225
226
227
228
229
  /*
   * Called by migrate.c to remove migration ptes, but might be used more later.
   */
  int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
  		struct vm_area_struct *, unsigned long, void *), void *arg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
230
231
232
233
234
  #else	/* !CONFIG_MMU */
  
  #define anon_vma_init()		do {} while (0)
  #define anon_vma_prepare(vma)	(0)
  #define anon_vma_link(vma)	do {} while (0)
01ff53f41   Mike Frysinger   rmap: fixup page_...
235
236
237
238
239
  static inline int page_referenced(struct page *page, int is_locked,
  				  struct mem_cgroup *cnt,
  				  unsigned long *vm_flags)
  {
  	*vm_flags = 0;
645747462   Johannes Weiner   vmscan: detect ma...
240
  	return 0;
01ff53f41   Mike Frysinger   rmap: fixup page_...
241
  }
a48d07afd   Christoph Lameter   [PATCH] Direct Mi...
242
  #define try_to_unmap(page, refs) SWAP_FAIL
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
243

d08b3851d   Peter Zijlstra   [PATCH] mm: track...
244
245
246
247
  static inline int page_mkclean(struct page *page)
  {
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
248
249
250
251
252
253
254
255
  #endif	/* CONFIG_MMU */
  
  /*
   * Return values of try_to_unmap
   */
  #define SWAP_SUCCESS	0
  #define SWAP_AGAIN	1
  #define SWAP_FAIL	2
b291f0003   Nick Piggin   mlock: mlocked pa...
256
  #define SWAP_MLOCK	3
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
257
258
  
  #endif	/* _LINUX_RMAP_H */