Blame view

include/linux/rmap.h 7.64 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
  #ifndef _LINUX_RMAP_H
  #define _LINUX_RMAP_H
  /*
   * Declarations for Reverse Mapping functions in mm/rmap.c
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
  #include <linux/list.h>
  #include <linux/slab.h>
  #include <linux/mm.h>
2b575eb64   Peter Zijlstra   mm: convert anon_...
9
  #include <linux/mutex.h>
bed7161a5   Balbir Singh   Memory controller...
10
  #include <linux/memcontrol.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
  
  /*
   * The anon_vma heads a list of private "related" vmas, to scan if
   * an anonymous page pointing to this anon_vma needs to be unmapped:
   * the vmas on the list will be related by forking, or by splitting.
   *
   * Since vmas come and go as they are split and merged (particularly
   * in mprotect), the mapping field of an anonymous page cannot point
   * directly to a vma: instead it points to an anon_vma, on whose list
   * the related vmas can be easily linked or unlinked.
   *
   * After unlinking the last vma on the list, we must garbage collect
   * the anon_vma object itself: we're guaranteed no page can be
   * pointing to this anon_vma once its vma list is empty.
   */
  struct anon_vma {
5c341ee1d   Rik van Riel   mm: track the roo...
27
  	struct anon_vma *root;	/* Root of this anon_vma tree */
2b575eb64   Peter Zijlstra   mm: convert anon_...
28
  	struct mutex mutex;	/* Serialize access to vma list */
7f60c214f   Mel Gorman   mm: migration: sh...
29
  	/*
83813267c   Peter Zijlstra   mm: move anon_vma...
30
  	 * The refcount is taken on an anon_vma when there is no
7f60c214f   Mel Gorman   mm: migration: sh...
31
32
33
34
35
  	 * guarantee that the vma of page tables will exist for
  	 * the duration of the operation. A caller that takes
  	 * the reference is responsible for clearing up the
  	 * anon_vma if they are the last user on release
  	 */
83813267c   Peter Zijlstra   mm: move anon_vma...
36
  	atomic_t refcount;
7906d00cd   Andrea Arcangeli   mmu-notifiers: ad...
37
38
39
40
41
42
43
44
  	/*
  	 * NOTE: the LSB of the head.next is set by
  	 * mm_take_all_locks() _after_ taking the above lock. So the
  	 * head must only be read/written after taking the above lock
  	 * to be sure to see a valid next pointer. The LSB bit itself
  	 * is serialized by a system wide lock only visible to
  	 * mm_take_all_locks() (mm_all_locks_mutex).
  	 */
5beb49305   Rik van Riel   mm: change anon_v...
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
  	struct list_head head;	/* Chain of private "related" vmas */
  };
  
  /*
   * The copy-on-write semantics of fork mean that an anon_vma
   * can become associated with multiple processes. Furthermore,
   * each child process will have its own anon_vma, where new
   * pages for that process are instantiated.
   *
   * This structure allows us to find the anon_vmas associated
   * with a VMA, or the VMAs associated with an anon_vma.
   * The "same_vma" list contains the anon_vma_chains linking
   * all the anon_vmas associated with this VMA.
   * The "same_anon_vma" list contains the anon_vma_chains
   * which link all the VMAs associated with this anon_vma.
   */
  struct anon_vma_chain {
  	struct vm_area_struct *vma;
  	struct anon_vma *anon_vma;
  	struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */
2b575eb64   Peter Zijlstra   mm: convert anon_...
65
  	struct list_head same_anon_vma;	/* locked by anon_vma->mutex */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
66
67
68
  };
  
  #ifdef CONFIG_MMU
76545066c   Rik van Riel   mm: extend KSM re...
69
70
  static inline void get_anon_vma(struct anon_vma *anon_vma)
  {
83813267c   Peter Zijlstra   mm: move anon_vma...
71
  	atomic_inc(&anon_vma->refcount);
76545066c   Rik van Riel   mm: extend KSM re...
72
  }
01d8b20de   Peter Zijlstra   mm: simplify anon...
73
74
75
76
77
78
79
  void __put_anon_vma(struct anon_vma *anon_vma);
  
  static inline void put_anon_vma(struct anon_vma *anon_vma)
  {
  	if (atomic_dec_and_test(&anon_vma->refcount))
  		__put_anon_vma(anon_vma);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
80

3ca7b3c5b   Hugh Dickins   mm: define PAGE_M...
81
82
83
84
85
86
87
  static inline struct anon_vma *page_anon_vma(struct page *page)
  {
  	if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
  					    PAGE_MAPPING_ANON)
  		return NULL;
  	return page_rmapping(page);
  }
bb4a340e0   Rik van Riel   mm: rename anon_v...
88
  static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
89
90
91
  {
  	struct anon_vma *anon_vma = vma->anon_vma;
  	if (anon_vma)
2b575eb64   Peter Zijlstra   mm: convert anon_...
92
  		mutex_lock(&anon_vma->root->mutex);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
93
  }
bb4a340e0   Rik van Riel   mm: rename anon_v...
94
  static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
95
96
97
  {
  	struct anon_vma *anon_vma = vma->anon_vma;
  	if (anon_vma)
2b575eb64   Peter Zijlstra   mm: convert anon_...
98
  		mutex_unlock(&anon_vma->root->mutex);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
99
  }
cba48b98f   Rik van Riel   mm: change direct...
100
101
  static inline void anon_vma_lock(struct anon_vma *anon_vma)
  {
2b575eb64   Peter Zijlstra   mm: convert anon_...
102
  	mutex_lock(&anon_vma->root->mutex);
cba48b98f   Rik van Riel   mm: change direct...
103
104
105
106
  }
  
  static inline void anon_vma_unlock(struct anon_vma *anon_vma)
  {
2b575eb64   Peter Zijlstra   mm: convert anon_...
107
  	mutex_unlock(&anon_vma->root->mutex);
cba48b98f   Rik van Riel   mm: change direct...
108
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
109
110
111
112
113
  /*
   * anon_vma helper functions.
   */
  void anon_vma_init(void);	/* create anon_vma_cachep */
  int  anon_vma_prepare(struct vm_area_struct *);
5beb49305   Rik van Riel   mm: change anon_v...
114
115
116
  void unlink_anon_vmas(struct vm_area_struct *);
  int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
  int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
117
  void __anon_vma_link(struct vm_area_struct *);
5beb49305   Rik van Riel   mm: change anon_v...
118
119
120
121
122
123
  static inline void anon_vma_merge(struct vm_area_struct *vma,
  				  struct vm_area_struct *next)
  {
  	VM_BUG_ON(vma->anon_vma != next->anon_vma);
  	unlink_anon_vmas(next);
  }
01d8b20de   Peter Zijlstra   mm: simplify anon...
124
  struct anon_vma *page_get_anon_vma(struct page *page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
125
126
127
  /*
   * rmap interfaces called when adding or removing pte of page
   */
c44b67432   Rik van Riel   rmap: move exclus...
128
  void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
129
  void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
ad8c2ee80   Rik van Riel   rmap: add exclusi...
130
131
  void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
  			   unsigned long, int);
9617d95e6   Nick Piggin   [PATCH] mm: rmap ...
132
  void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
133
  void page_add_file_rmap(struct page *);
edc315fd2   Hugh Dickins   badpage: remove v...
134
  void page_remove_rmap(struct page *);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
135

0fe6e20b9   Naoya Horiguchi   hugetlb, rmap: ad...
136
137
138
139
  void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
  			    unsigned long);
  void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
  				unsigned long);
21333b2b6   Hugh Dickins   ksm: no debug in ...
140
  static inline void page_dup_rmap(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
141
142
143
144
145
146
147
  {
  	atomic_inc(&page->_mapcount);
  }
  
  /*
   * Called from mm/vmscan.c to handle paging out
   */
6fe6b7e35   Wu Fengguang   vmscan: report vm...
148
149
  int page_referenced(struct page *, int is_locked,
  			struct mem_cgroup *cnt, unsigned long *vm_flags);
5ad646880   Hugh Dickins   ksm: let shared p...
150
151
  int page_referenced_one(struct page *, struct vm_area_struct *,
  	unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
14fa31b89   Andi Kleen   HWPOISON: Use bit...
152
153
154
155
156
157
158
159
  enum ttu_flags {
  	TTU_UNMAP = 0,			/* unmap mode */
  	TTU_MIGRATION = 1,		/* migration mode */
  	TTU_MUNLOCK = 2,		/* munlock mode */
  	TTU_ACTION_MASK = 0xff,
  
  	TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */
  	TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */
888b9f7c5   Andi Kleen   HWPOISON: Handle ...
160
  	TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
14fa31b89   Andi Kleen   HWPOISON: Use bit...
161
162
  };
  #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
71e3aac07   Andrea Arcangeli   thp: transparent ...
163
  bool is_vma_temporary_stack(struct vm_area_struct *vma);
14fa31b89   Andi Kleen   HWPOISON: Use bit...
164
  int try_to_unmap(struct page *, enum ttu_flags flags);
5ad646880   Hugh Dickins   ksm: let shared p...
165
166
  int try_to_unmap_one(struct page *, struct vm_area_struct *,
  			unsigned long address, enum ttu_flags flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
167
168
  
  /*
ceffc0785   Carsten Otte   [PATCH] xip: fs/m...
169
170
   * Called from mm/filemap_xip.c to unmap empty zero page
   */
e9a81a821   Namhyung Kim   rmap: wrap page_c...
171
  pte_t *__page_check_address(struct page *, struct mm_struct *,
479db0bf4   Nick Piggin   mm: dirty page tr...
172
  				unsigned long, spinlock_t **, int);
ceffc0785   Carsten Otte   [PATCH] xip: fs/m...
173

e9a81a821   Namhyung Kim   rmap: wrap page_c...
174
175
176
177
178
179
180
181
182
183
  static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
  					unsigned long address,
  					spinlock_t **ptlp, int sync)
  {
  	pte_t *ptep;
  
  	__cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
  						       ptlp, sync));
  	return ptep;
  }
ceffc0785   Carsten Otte   [PATCH] xip: fs/m...
184
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
185
186
187
   * Used by swapoff to help locate where page is expected in vma.
   */
  unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
188
189
190
191
192
193
194
  /*
   * Cleans the PTEs of shared mappings.
   * (and since clean PTEs should also be readonly, write protects them too)
   *
   * returns the number of cleaned PTEs.
   */
  int page_mkclean(struct page *);
b291f0003   Nick Piggin   mlock: mlocked pa...
195
196
197
198
199
  /*
   * called in munlock()/munmap() path to check for other vmas holding
   * the page mlocked.
   */
  int try_to_munlock(struct page *);
b291f0003   Nick Piggin   mlock: mlocked pa...
200

10be22dfe   Andi Kleen   HWPOISON: Export ...
201
202
203
  /*
   * Called by memory-failure.c to kill processes.
   */
25aeeb046   Peter Zijlstra   mm: revert page_l...
204
  struct anon_vma *page_lock_anon_vma(struct page *page);
10be22dfe   Andi Kleen   HWPOISON: Export ...
205
  void page_unlock_anon_vma(struct anon_vma *anon_vma);
6a46079cf   Andi Kleen   HWPOISON: The hig...
206
  int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
10be22dfe   Andi Kleen   HWPOISON: Export ...
207

e9995ef97   Hugh Dickins   ksm: rmap_walk to...
208
209
210
211
212
  /*
   * Called by migrate.c to remove migration ptes, but might be used more later.
   */
  int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
  		struct vm_area_struct *, unsigned long, void *), void *arg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
213
214
215
216
217
  #else	/* !CONFIG_MMU */
  
  #define anon_vma_init()		do {} while (0)
  #define anon_vma_prepare(vma)	(0)
  #define anon_vma_link(vma)	do {} while (0)
01ff53f41   Mike Frysinger   rmap: fixup page_...
218
219
220
221
222
  static inline int page_referenced(struct page *page, int is_locked,
  				  struct mem_cgroup *cnt,
  				  unsigned long *vm_flags)
  {
  	*vm_flags = 0;
645747462   Johannes Weiner   vmscan: detect ma...
223
  	return 0;
01ff53f41   Mike Frysinger   rmap: fixup page_...
224
  }
a48d07afd   Christoph Lameter   [PATCH] Direct Mi...
225
  #define try_to_unmap(page, refs) SWAP_FAIL
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
226

d08b3851d   Peter Zijlstra   [PATCH] mm: track...
227
228
229
230
  static inline int page_mkclean(struct page *page)
  {
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
231
232
233
234
235
236
237
238
  #endif	/* CONFIG_MMU */
  
  /*
   * Return values of try_to_unmap
   */
  #define SWAP_SUCCESS	0
  #define SWAP_AGAIN	1
  #define SWAP_FAIL	2
b291f0003   Nick Piggin   mlock: mlocked pa...
239
  #define SWAP_MLOCK	3
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
240
241
  
  #endif	/* _LINUX_RMAP_H */