Blame view
mm/rmap.c
54 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
/* * mm/rmap.c - physical to virtual reverse mappings * * Copyright 2001, Rik van Riel <riel@conectiva.com.br> * Released under the General Public License (GPL). * * Simple, low overhead reverse mapping scheme. * Please try to keep this thing as modular as possible. * * Provides methods for unmapping each kind of mapped page: * the anon methods track anonymous pages, and * the file methods track pages belonging to an inode. * * Original design by Rik van Riel <riel@conectiva.com.br> 2001 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 |
98f32602d hugh: update emai... |
17 |
* Contributions by Hugh Dickins 2003, 2004 |
1da177e4c Linux-2.6.12-rc2 |
18 19 20 21 22 |
*/ /* * Lock ordering in mm: * |
1b1dcc1b5 [PATCH] mutex sub... |
23 |
* inode->i_mutex (while writing or truncating, not reading or faulting) |
82591e6ea [PATCH] mm: more ... |
24 25 |
* mm->mmap_sem * page->flags PG_locked (lock_page) |
88f306b68 mm: fix locking o... |
26 27 28 29 |
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) * mapping->i_mmap_rwsem * anon_vma->rwsem * mm->page_table_lock or pte_lock |
f4b7e272b mm: remove zone_l... |
30 |
* pgdat->lru_lock (in mark_page_accessed, isolate_lru_page) |
88f306b68 mm: fix locking o... |
31 32 33 34 |
* swap_lock (in swap_duplicate, swap_info_get) * mmlist_lock (in mmput, drain_mmlist and others) * mapping->private_lock (in __set_page_dirty_buffers) * mem_cgroup_{begin,end}_page_stat (memcg->move_lock) |
b93b01631 page cache: use x... |
35 |
* i_pages lock (widely used) |
88f306b68 mm: fix locking o... |
36 37 38 |
* inode->i_lock (in set_page_dirty's __mark_inode_dirty) * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) * sb_lock (within inode_lock in fs/fs-writeback.c) |
b93b01631 page cache: use x... |
39 |
* i_pages lock (widely used, in set_page_dirty, |
88f306b68 mm: fix locking o... |
40 41 |
* in arch-dependent flush_dcache_mmap_lock, * within bdi.wb->list_lock in __sync_single_inode) |
6a46079cf HWPOISON: The hig... |
42 |
* |
5a505085f mm/rmap: Convert ... |
43 |
* anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) |
9b679320a mm/memory-failure... |
44 |
* ->tasklist_lock |
6a46079cf HWPOISON: The hig... |
45 |
* pte map lock |
1da177e4c Linux-2.6.12-rc2 |
46 47 48 |
*/ #include <linux/mm.h> |
6e84f3152 sched/headers: Pr... |
49 |
#include <linux/sched/mm.h> |
299300258 sched/headers: Pr... |
50 |
#include <linux/sched/task.h> |
1da177e4c Linux-2.6.12-rc2 |
51 52 53 54 55 |
#include <linux/pagemap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/slab.h> #include <linux/init.h> |
5ad646880 ksm: let shared p... |
56 |
#include <linux/ksm.h> |
1da177e4c Linux-2.6.12-rc2 |
57 58 |
#include <linux/rmap.h> #include <linux/rcupdate.h> |
b95f1b31b mm: Map most file... |
59 |
#include <linux/export.h> |
8a9f3ccd2 Memory controller... |
60 |
#include <linux/memcontrol.h> |
cddb8a5c1 mmu-notifiers: core |
61 |
#include <linux/mmu_notifier.h> |
64cdd548f mm: cleanup: remo... |
62 |
#include <linux/migrate.h> |
0fe6e20b9 hugetlb, rmap: ad... |
63 |
#include <linux/hugetlb.h> |
444f84fd2 mm: include <linu... |
64 |
#include <linux/huge_mm.h> |
ef5d437f7 mm: fix XFS oops ... |
65 |
#include <linux/backing-dev.h> |
33c3fc71c mm: introduce idl... |
66 |
#include <linux/page_idle.h> |
a5430dda8 mm/migrate: suppo... |
67 |
#include <linux/memremap.h> |
bce73e484 mm: do not drop u... |
68 |
#include <linux/userfaultfd_k.h> |
1da177e4c Linux-2.6.12-rc2 |
69 70 |
#include <asm/tlbflush.h> |
72b252aed mm: send one IPI ... |
71 |
#include <trace/events/tlb.h> |
b291f0003 mlock: mlocked pa... |
72 |
#include "internal.h" |
fdd2e5f88 make mm/rmap.c:an... |
73 |
static struct kmem_cache *anon_vma_cachep; |
5beb49305 mm: change anon_v... |
74 |
static struct kmem_cache *anon_vma_chain_cachep; |
fdd2e5f88 make mm/rmap.c:an... |
75 76 77 |
static inline struct anon_vma *anon_vma_alloc(void) { |
01d8b20de mm: simplify anon... |
78 79 80 81 82 |
struct anon_vma *anon_vma; anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); if (anon_vma) { atomic_set(&anon_vma->refcount, 1); |
7a3ef208e mm: prevent endle... |
83 84 |
anon_vma->degree = 1; /* Reference for first vma */ anon_vma->parent = anon_vma; |
01d8b20de mm: simplify anon... |
85 86 87 88 89 90 91 92 |
/* * Initialise the anon_vma root to point to itself. If called * from fork, the root will be reset to the parents anon_vma. */ anon_vma->root = anon_vma; } return anon_vma; |
fdd2e5f88 make mm/rmap.c:an... |
93 |
} |
01d8b20de mm: simplify anon... |
94 |
static inline void anon_vma_free(struct anon_vma *anon_vma) |
fdd2e5f88 make mm/rmap.c:an... |
95 |
{ |
01d8b20de mm: simplify anon... |
96 |
VM_BUG_ON(atomic_read(&anon_vma->refcount)); |
88c22088b mm: optimize page... |
97 98 |
/* |
4fc3f1d66 mm/rmap, migratio... |
99 |
* Synchronize against page_lock_anon_vma_read() such that |
88c22088b mm: optimize page... |
100 101 102 103 104 |
* we can safely hold the lock without the anon_vma getting * freed. * * Relies on the full mb implied by the atomic_dec_and_test() from * put_anon_vma() against the acquire barrier implied by |
4fc3f1d66 mm/rmap, migratio... |
105 |
* down_read_trylock() from page_lock_anon_vma_read(). This orders: |
88c22088b mm: optimize page... |
106 |
* |
4fc3f1d66 mm/rmap, migratio... |
107 108 |
* page_lock_anon_vma_read() VS put_anon_vma() * down_read_trylock() atomic_dec_and_test() |
88c22088b mm: optimize page... |
109 |
* LOCK MB |
4fc3f1d66 mm/rmap, migratio... |
110 |
* atomic_read() rwsem_is_locked() |
88c22088b mm: optimize page... |
111 112 113 114 |
* * LOCK should suffice since the actual taking of the lock must * happen _before_ what follows. */ |
7f39dda9d mm: fix sleeping ... |
115 |
might_sleep(); |
5a505085f mm/rmap: Convert ... |
116 |
if (rwsem_is_locked(&anon_vma->root->rwsem)) { |
4fc3f1d66 mm/rmap, migratio... |
117 |
anon_vma_lock_write(anon_vma); |
08b52706d mm/rmap: rename a... |
118 |
anon_vma_unlock_write(anon_vma); |
88c22088b mm: optimize page... |
119 |
} |
fdd2e5f88 make mm/rmap.c:an... |
120 121 |
kmem_cache_free(anon_vma_cachep, anon_vma); } |
1da177e4c Linux-2.6.12-rc2 |
122 |
|
dd34739c0 mm: avoid anon_vm... |
123 |
static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) |
5beb49305 mm: change anon_v... |
124 |
{ |
dd34739c0 mm: avoid anon_vm... |
125 |
return kmem_cache_alloc(anon_vma_chain_cachep, gfp); |
5beb49305 mm: change anon_v... |
126 |
} |
e574b5fd2 rmap: make anon_v... |
127 |
static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) |
5beb49305 mm: change anon_v... |
128 129 130 |
{ kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); } |
6583a8430 rmap: anon_vma_pr... |
131 132 133 134 135 136 137 |
static void anon_vma_chain_link(struct vm_area_struct *vma, struct anon_vma_chain *avc, struct anon_vma *anon_vma) { avc->vma = vma; avc->anon_vma = anon_vma; list_add(&avc->same_vma, &vma->anon_vma_chain); |
bf181b9f9 mm anon rmap: rep... |
138 |
anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); |
6583a8430 rmap: anon_vma_pr... |
139 |
} |
d9d332e08 anon_vma_prepare:... |
140 |
/** |
d5a187daf mm, rmap: handle ... |
141 |
* __anon_vma_prepare - attach an anon_vma to a memory region |
d9d332e08 anon_vma_prepare:... |
142 143 144 145 146 147 |
* @vma: the memory region in question * * This makes sure the memory mapping described by 'vma' has * an 'anon_vma' attached to it, so that we can associate the * anonymous pages mapped into it with that anon_vma. * |
d5a187daf mm, rmap: handle ... |
148 149 |
* The common case will be that we already have one, which * is handled inline by anon_vma_prepare(). But if |
23a0790af mm/rmap.c: fix co... |
150 |
* not we either need to find an adjacent mapping that we |
d9d332e08 anon_vma_prepare:... |
151 152 153 154 155 |
* can re-use the anon_vma from (very common when the only * reason for splitting a vma has been mprotect()), or we * allocate a new one. * * Anon-vma allocations are very subtle, because we may have |
4fc3f1d66 mm/rmap, migratio... |
156 |
* optimistically looked up an anon_vma in page_lock_anon_vma_read() |
d9d332e08 anon_vma_prepare:... |
157 158 159 160 161 162 163 164 165 166 167 |
* and that may actually touch the spinlock even in the newly * allocated vma (it depends on RCU to make sure that the * anon_vma isn't actually destroyed). * * As a result, we need to do proper anon_vma locking even * for the new allocation. At the same time, we do not want * to do any locking for the common case of already having * an anon_vma. * * This must be called with the mmap_sem held for reading. */ |
d5a187daf mm, rmap: handle ... |
168 |
int __anon_vma_prepare(struct vm_area_struct *vma) |
1da177e4c Linux-2.6.12-rc2 |
169 |
{ |
d5a187daf mm, rmap: handle ... |
170 171 |
struct mm_struct *mm = vma->vm_mm; struct anon_vma *anon_vma, *allocated; |
5beb49305 mm: change anon_v... |
172 |
struct anon_vma_chain *avc; |
1da177e4c Linux-2.6.12-rc2 |
173 174 |
might_sleep(); |
1da177e4c Linux-2.6.12-rc2 |
175 |
|
d5a187daf mm, rmap: handle ... |
176 177 178 179 180 181 182 183 184 185 186 187 |
avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_enomem; anon_vma = find_mergeable_anon_vma(vma); allocated = NULL; if (!anon_vma) { anon_vma = anon_vma_alloc(); if (unlikely(!anon_vma)) goto out_enomem_free_avc; allocated = anon_vma; } |
5beb49305 mm: change anon_v... |
188 |
|
d5a187daf mm, rmap: handle ... |
189 190 191 192 193 194 195 196 |
anon_vma_lock_write(anon_vma); /* page_table_lock to protect against threads */ spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; anon_vma_chain_link(vma, avc, anon_vma); /* vma reference or self-parent link for new root */ anon_vma->degree++; |
d9d332e08 anon_vma_prepare:... |
197 |
allocated = NULL; |
d5a187daf mm, rmap: handle ... |
198 199 200 201 |
avc = NULL; } spin_unlock(&mm->page_table_lock); anon_vma_unlock_write(anon_vma); |
1da177e4c Linux-2.6.12-rc2 |
202 |
|
d5a187daf mm, rmap: handle ... |
203 204 205 206 |
if (unlikely(allocated)) put_anon_vma(allocated); if (unlikely(avc)) anon_vma_chain_free(avc); |
31f2b0ebc rmap: anon_vma_pr... |
207 |
|
1da177e4c Linux-2.6.12-rc2 |
208 |
return 0; |
5beb49305 mm: change anon_v... |
209 210 211 212 213 |
out_enomem_free_avc: anon_vma_chain_free(avc); out_enomem: return -ENOMEM; |
1da177e4c Linux-2.6.12-rc2 |
214 |
} |
bb4aa3967 mm: avoid repeate... |
215 216 217 218 219 220 221 222 223 224 225 226 227 |
/* * This is a useful helper function for locking the anon_vma root as * we traverse the vma->anon_vma_chain, looping over anon_vma's that * have the same vma. * * Such anon_vma's should have the same root, so you'd expect to see * just a single mutex_lock for the whole traversal. */ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) { struct anon_vma *new_root = anon_vma->root; if (new_root != root) { if (WARN_ON_ONCE(root)) |
5a505085f mm/rmap: Convert ... |
228 |
up_write(&root->rwsem); |
bb4aa3967 mm: avoid repeate... |
229 |
root = new_root; |
5a505085f mm/rmap: Convert ... |
230 |
down_write(&root->rwsem); |
bb4aa3967 mm: avoid repeate... |
231 232 233 234 235 236 237 |
} return root; } static inline void unlock_anon_vma_root(struct anon_vma *root) { if (root) |
5a505085f mm/rmap: Convert ... |
238 |
up_write(&root->rwsem); |
bb4aa3967 mm: avoid repeate... |
239 |
} |
5beb49305 mm: change anon_v... |
240 241 242 |
/* * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. |
7a3ef208e mm: prevent endle... |
243 244 245 246 247 248 249 250 |
* * If dst->anon_vma is NULL this function tries to find and reuse existing * anon_vma which has no vmas and only one child anon_vma. This prevents * degradation of anon_vma hierarchy to endless linear chain in case of * constantly forking task. On the other hand, an anon_vma with more than one * child isn't reused even if there was no alive vma, thus rmap walker has a * good chance of avoiding scanning the whole hierarchy when it searches where * page is mapped. |
5beb49305 mm: change anon_v... |
251 252 |
*/ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) |
1da177e4c Linux-2.6.12-rc2 |
253 |
{ |
5beb49305 mm: change anon_v... |
254 |
struct anon_vma_chain *avc, *pavc; |
bb4aa3967 mm: avoid repeate... |
255 |
struct anon_vma *root = NULL; |
5beb49305 mm: change anon_v... |
256 |
|
646d87b48 anon_vma: clone t... |
257 |
list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { |
bb4aa3967 mm: avoid repeate... |
258 |
struct anon_vma *anon_vma; |
dd34739c0 mm: avoid anon_vm... |
259 260 261 262 263 264 265 266 |
avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); if (unlikely(!avc)) { unlock_anon_vma_root(root); root = NULL; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto enomem_failure; } |
bb4aa3967 mm: avoid repeate... |
267 268 269 |
anon_vma = pavc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_chain_link(dst, avc, anon_vma); |
7a3ef208e mm: prevent endle... |
270 271 272 273 274 275 276 277 278 279 280 281 |
/* * Reuse existing anon_vma if its degree lower than two, * that means it has no vma and only one anon_vma child. * * Do not chose parent anon_vma, otherwise first child * will always reuse it. Root anon_vma is never reused: * it has self-parent reference and at least one child. */ if (!dst->anon_vma && anon_vma != src->anon_vma && anon_vma->degree < 2) dst->anon_vma = anon_vma; |
5beb49305 mm: change anon_v... |
282 |
} |
7a3ef208e mm: prevent endle... |
283 284 |
if (dst->anon_vma) dst->anon_vma->degree++; |
bb4aa3967 mm: avoid repeate... |
285 |
unlock_anon_vma_root(root); |
5beb49305 mm: change anon_v... |
286 |
return 0; |
1da177e4c Linux-2.6.12-rc2 |
287 |
|
5beb49305 mm: change anon_v... |
288 |
enomem_failure: |
3fe89b3e2 mm: fix anon_vma-... |
289 290 291 292 293 294 295 |
/* * dst->anon_vma is dropped here otherwise its degree can be incorrectly * decremented in unlink_anon_vmas(). * We can safely do this because callers of anon_vma_clone() don't care * about dst->anon_vma if anon_vma_clone() failed. */ dst->anon_vma = NULL; |
5beb49305 mm: change anon_v... |
296 297 |
unlink_anon_vmas(dst); return -ENOMEM; |
1da177e4c Linux-2.6.12-rc2 |
298 |
} |
5beb49305 mm: change anon_v... |
299 300 301 302 303 304 |
/* * Attach vma to its own anon_vma, as well as to the anon_vmas that * the corresponding VMA in the parent process is attached to. * Returns 0 on success, non-zero on failure. */ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) |
1da177e4c Linux-2.6.12-rc2 |
305 |
{ |
5beb49305 mm: change anon_v... |
306 307 |
struct anon_vma_chain *avc; struct anon_vma *anon_vma; |
c4ea95d7c mm: fix anon_vma_... |
308 |
int error; |
1da177e4c Linux-2.6.12-rc2 |
309 |
|
5beb49305 mm: change anon_v... |
310 311 312 |
/* Don't bother if the parent process has no anon_vma here. */ if (!pvma->anon_vma) return 0; |
7a3ef208e mm: prevent endle... |
313 314 |
/* Drop inherited anon_vma, we'll reuse existing or allocate new. */ vma->anon_vma = NULL; |
5beb49305 mm: change anon_v... |
315 316 317 318 |
/* * First, attach the new VMA to the parent VMA's anon_vmas, * so rmap can find non-COWed pages in child processes. */ |
c4ea95d7c mm: fix anon_vma_... |
319 320 321 |
error = anon_vma_clone(vma, pvma); if (error) return error; |
5beb49305 mm: change anon_v... |
322 |
|
7a3ef208e mm: prevent endle... |
323 324 325 |
/* An existing anon_vma has been reused, all done then. */ if (vma->anon_vma) return 0; |
5beb49305 mm: change anon_v... |
326 327 328 329 |
/* Then add our own anon_vma. */ anon_vma = anon_vma_alloc(); if (!anon_vma) goto out_error; |
dd34739c0 mm: avoid anon_vm... |
330 |
avc = anon_vma_chain_alloc(GFP_KERNEL); |
5beb49305 mm: change anon_v... |
331 332 |
if (!avc) goto out_error_free_anon_vma; |
5c341ee1d mm: track the roo... |
333 334 335 336 337 338 |
/* * The root anon_vma's spinlock is the lock actually used when we * lock any of the anon_vmas in this anon_vma tree. */ anon_vma->root = pvma->anon_vma->root; |
7a3ef208e mm: prevent endle... |
339 |
anon_vma->parent = pvma->anon_vma; |
76545066c mm: extend KSM re... |
340 |
/* |
01d8b20de mm: simplify anon... |
341 342 343 |
* With refcounts, an anon_vma can stay around longer than the * process it belongs to. The root anon_vma needs to be pinned until * this anon_vma is freed, because the lock lives in the root. |
76545066c mm: extend KSM re... |
344 345 |
*/ get_anon_vma(anon_vma->root); |
5beb49305 mm: change anon_v... |
346 347 |
/* Mark this anon_vma as the one where our new (COWed) pages go. */ vma->anon_vma = anon_vma; |
4fc3f1d66 mm/rmap, migratio... |
348 |
anon_vma_lock_write(anon_vma); |
5c341ee1d mm: track the roo... |
349 |
anon_vma_chain_link(vma, avc, anon_vma); |
7a3ef208e mm: prevent endle... |
350 |
anon_vma->parent->degree++; |
08b52706d mm/rmap: rename a... |
351 |
anon_vma_unlock_write(anon_vma); |
5beb49305 mm: change anon_v... |
352 353 354 355 |
return 0; out_error_free_anon_vma: |
01d8b20de mm: simplify anon... |
356 |
put_anon_vma(anon_vma); |
5beb49305 mm: change anon_v... |
357 |
out_error: |
4946d54cb rmap: fix anon_vm... |
358 |
unlink_anon_vmas(vma); |
5beb49305 mm: change anon_v... |
359 |
return -ENOMEM; |
1da177e4c Linux-2.6.12-rc2 |
360 |
} |
5beb49305 mm: change anon_v... |
361 362 363 |
void unlink_anon_vmas(struct vm_area_struct *vma) { struct anon_vma_chain *avc, *next; |
eee2acbae mm: avoid repeate... |
364 |
struct anon_vma *root = NULL; |
5beb49305 mm: change anon_v... |
365 |
|
5c341ee1d mm: track the roo... |
366 367 368 369 |
/* * Unlink each anon_vma chained to the VMA. This list is ordered * from newest to oldest, ensuring the root anon_vma gets freed last. */ |
5beb49305 mm: change anon_v... |
370 |
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
eee2acbae mm: avoid repeate... |
371 372 373 |
struct anon_vma *anon_vma = avc->anon_vma; root = lock_anon_vma_root(root, anon_vma); |
bf181b9f9 mm anon rmap: rep... |
374 |
anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); |
eee2acbae mm: avoid repeate... |
375 376 377 378 379 |
/* * Leave empty anon_vmas on the list - we'll need * to free them outside the lock. */ |
f808c13fd lib/interval_tree... |
380 |
if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { |
7a3ef208e mm: prevent endle... |
381 |
anon_vma->parent->degree--; |
eee2acbae mm: avoid repeate... |
382 |
continue; |
7a3ef208e mm: prevent endle... |
383 |
} |
eee2acbae mm: avoid repeate... |
384 385 386 387 |
list_del(&avc->same_vma); anon_vma_chain_free(avc); } |
7a3ef208e mm: prevent endle... |
388 389 |
if (vma->anon_vma) vma->anon_vma->degree--; |
eee2acbae mm: avoid repeate... |
390 391 392 393 394 |
unlock_anon_vma_root(root); /* * Iterate the list once more, it now only contains empty and unlinked * anon_vmas, destroy them. Could not do before due to __put_anon_vma() |
5a505085f mm/rmap: Convert ... |
395 |
* needing to write-acquire the anon_vma->root->rwsem. |
eee2acbae mm: avoid repeate... |
396 397 398 |
*/ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; |
e4c5800a3 mm/rmap: replace ... |
399 |
VM_WARN_ON(anon_vma->degree); |
eee2acbae mm: avoid repeate... |
400 |
put_anon_vma(anon_vma); |
5beb49305 mm: change anon_v... |
401 402 403 404 |
list_del(&avc->same_vma); anon_vma_chain_free(avc); } } |
51cc50685 SL*B: drop kmem c... |
405 |
static void anon_vma_ctor(void *data) |
1da177e4c Linux-2.6.12-rc2 |
406 |
{ |
a35afb830 Remove SLAB_CTOR_... |
407 |
struct anon_vma *anon_vma = data; |
1da177e4c Linux-2.6.12-rc2 |
408 |
|
5a505085f mm/rmap: Convert ... |
409 |
init_rwsem(&anon_vma->rwsem); |
83813267c mm: move anon_vma... |
410 |
atomic_set(&anon_vma->refcount, 0); |
f808c13fd lib/interval_tree... |
411 |
anon_vma->rb_root = RB_ROOT_CACHED; |
1da177e4c Linux-2.6.12-rc2 |
412 413 414 415 416 |
} void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), |
5f0d5a3ae mm: Rename SLAB_D... |
417 |
0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, |
5d097056c kmemcg: account c... |
418 419 420 |
anon_vma_ctor); anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC|SLAB_ACCOUNT); |
1da177e4c Linux-2.6.12-rc2 |
421 422 423 |
} /* |
6111e4ca6 mm: improve page_... |
424 425 426 427 428 429 430 431 432 |
* Getting a lock on a stable anon_vma from a page off the LRU is tricky! * * Since there is no serialization what so ever against page_remove_rmap() * the best this function can do is return a locked anon_vma that might * have been relevant to this page. * * The page might have been remapped to a different anon_vma or the anon_vma * returned may already be freed (and even reused). * |
bc658c960 mm, rmap: Add yet... |
433 434 435 436 437 |
* In case it was remapped to a different anon_vma, the new anon_vma will be a * child of the old anon_vma, and the anon_vma lifetime rules will therefore * ensure that any anon_vma obtained from the page will still be valid for as * long as we observe page_mapped() [ hence all those page_mapped() tests ]. * |
6111e4ca6 mm: improve page_... |
438 439 440 441 442 443 444 |
* All users of this function must be very careful when walking the anon_vma * chain and verify that the page in question is indeed mapped in it * [ something equivalent to page_mapped_in_vma() ]. * * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() * that the anon_vma pointer from page->mapping is valid if there is a * mapcount, we can dereference the anon_vma after observing those. |
1da177e4c Linux-2.6.12-rc2 |
445 |
*/ |
746b18d42 mm: use refcounts... |
446 |
struct anon_vma *page_get_anon_vma(struct page *page) |
1da177e4c Linux-2.6.12-rc2 |
447 |
{ |
746b18d42 mm: use refcounts... |
448 |
struct anon_vma *anon_vma = NULL; |
1da177e4c Linux-2.6.12-rc2 |
449 450 451 |
unsigned long anon_mapping; rcu_read_lock(); |
4db0c3c29 mm: remove rest o... |
452 |
anon_mapping = (unsigned long)READ_ONCE(page->mapping); |
3ca7b3c5b mm: define PAGE_M... |
453 |
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
1da177e4c Linux-2.6.12-rc2 |
454 455 456 457 458 |
goto out; if (!page_mapped(page)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
746b18d42 mm: use refcounts... |
459 460 461 462 |
if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } |
f18194275 mm: fix hang on a... |
463 464 465 |
/* * If this page is still mapped, then its anon_vma cannot have been |
746b18d42 mm: use refcounts... |
466 467 |
* freed. But if it has been unmapped, we have no security against the * anon_vma structure being freed and reused (for another anon_vma: |
5f0d5a3ae mm: Rename SLAB_D... |
468 |
* SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() |
746b18d42 mm: use refcounts... |
469 |
* above cannot corrupt). |
f18194275 mm: fix hang on a... |
470 |
*/ |
746b18d42 mm: use refcounts... |
471 |
if (!page_mapped(page)) { |
7f39dda9d mm: fix sleeping ... |
472 |
rcu_read_unlock(); |
746b18d42 mm: use refcounts... |
473 |
put_anon_vma(anon_vma); |
7f39dda9d mm: fix sleeping ... |
474 |
return NULL; |
746b18d42 mm: use refcounts... |
475 |
} |
1da177e4c Linux-2.6.12-rc2 |
476 477 |
out: rcu_read_unlock(); |
746b18d42 mm: use refcounts... |
478 479 480 |
return anon_vma; } |
88c22088b mm: optimize page... |
481 482 483 484 485 486 487 |
/* * Similar to page_get_anon_vma() except it locks the anon_vma. * * Its a little more complex as it tries to keep the fast path to a single * atomic op -- the trylock. If we fail the trylock, we fall back to getting a * reference like with page_get_anon_vma() and then block on the mutex. */ |
4fc3f1d66 mm/rmap, migratio... |
488 |
struct anon_vma *page_lock_anon_vma_read(struct page *page) |
746b18d42 mm: use refcounts... |
489 |
{ |
88c22088b mm: optimize page... |
490 |
struct anon_vma *anon_vma = NULL; |
eee0f252c mm: fix page_lock... |
491 |
struct anon_vma *root_anon_vma; |
88c22088b mm: optimize page... |
492 |
unsigned long anon_mapping; |
746b18d42 mm: use refcounts... |
493 |
|
88c22088b mm: optimize page... |
494 |
rcu_read_lock(); |
4db0c3c29 mm: remove rest o... |
495 |
anon_mapping = (unsigned long)READ_ONCE(page->mapping); |
88c22088b mm: optimize page... |
496 497 498 499 500 501 |
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!page_mapped(page)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
4db0c3c29 mm: remove rest o... |
502 |
root_anon_vma = READ_ONCE(anon_vma->root); |
4fc3f1d66 mm/rmap, migratio... |
503 |
if (down_read_trylock(&root_anon_vma->rwsem)) { |
88c22088b mm: optimize page... |
504 |
/* |
eee0f252c mm: fix page_lock... |
505 506 |
* If the page is still mapped, then this anon_vma is still * its anon_vma, and holding the mutex ensures that it will |
bc658c960 mm, rmap: Add yet... |
507 |
* not go away, see anon_vma_free(). |
88c22088b mm: optimize page... |
508 |
*/ |
eee0f252c mm: fix page_lock... |
509 |
if (!page_mapped(page)) { |
4fc3f1d66 mm/rmap, migratio... |
510 |
up_read(&root_anon_vma->rwsem); |
88c22088b mm: optimize page... |
511 512 513 514 |
anon_vma = NULL; } goto out; } |
746b18d42 mm: use refcounts... |
515 |
|
88c22088b mm: optimize page... |
516 517 518 519 520 521 522 |
/* trylock failed, we got to sleep */ if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } if (!page_mapped(page)) { |
7f39dda9d mm: fix sleeping ... |
523 |
rcu_read_unlock(); |
88c22088b mm: optimize page... |
524 |
put_anon_vma(anon_vma); |
7f39dda9d mm: fix sleeping ... |
525 |
return NULL; |
88c22088b mm: optimize page... |
526 527 528 529 |
} /* we pinned the anon_vma, its safe to sleep */ rcu_read_unlock(); |
4fc3f1d66 mm/rmap, migratio... |
530 |
anon_vma_lock_read(anon_vma); |
88c22088b mm: optimize page... |
531 532 533 534 535 |
if (atomic_dec_and_test(&anon_vma->refcount)) { /* * Oops, we held the last refcount, release the lock * and bail -- can't simply use put_anon_vma() because |
4fc3f1d66 mm/rmap, migratio... |
536 |
* we'll deadlock on the anon_vma_lock_write() recursion. |
88c22088b mm: optimize page... |
537 |
*/ |
4fc3f1d66 mm/rmap, migratio... |
538 |
anon_vma_unlock_read(anon_vma); |
88c22088b mm: optimize page... |
539 540 541 542 543 544 545 546 |
__put_anon_vma(anon_vma); anon_vma = NULL; } return anon_vma; out: rcu_read_unlock(); |
746b18d42 mm: use refcounts... |
547 |
return anon_vma; |
34bbd7040 [PATCH] adapt pag... |
548 |
} |
4fc3f1d66 mm/rmap, migratio... |
549 |
void page_unlock_anon_vma_read(struct anon_vma *anon_vma) |
34bbd7040 [PATCH] adapt pag... |
550 |
{ |
4fc3f1d66 mm/rmap, migratio... |
551 |
anon_vma_unlock_read(anon_vma); |
1da177e4c Linux-2.6.12-rc2 |
552 |
} |
72b252aed mm: send one IPI ... |
553 |
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
72b252aed mm: send one IPI ... |
554 555 556 557 558 559 560 561 562 |
/* * Flush TLB entries for recently unmapped pages from remote CPUs. It is * important if a PTE was dirty when it was unmapped that it's flushed * before any IO is initiated on the page to prevent lost writes. Similarly, * it must be flushed before freeing to prevent data leakage. */ void try_to_unmap_flush(void) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
72b252aed mm: send one IPI ... |
563 564 565 |
if (!tlb_ubc->flush_required) return; |
e73ad5ff2 mm, x86/mm: Make ... |
566 |
arch_tlbbatch_flush(&tlb_ubc->arch); |
72b252aed mm: send one IPI ... |
567 |
tlb_ubc->flush_required = false; |
d950c9477 mm: defer flush o... |
568 |
tlb_ubc->writable = false; |
72b252aed mm: send one IPI ... |
569 |
} |
d950c9477 mm: defer flush o... |
570 571 572 573 574 575 576 577 |
/* Flush iff there are potentially writable TLB entries that can race with IO */ void try_to_unmap_flush_dirty(void) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; if (tlb_ubc->writable) try_to_unmap_flush(); } |
c7ab0d2fd mm: convert try_t... |
578 |
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) |
72b252aed mm: send one IPI ... |
579 580 |
{ struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
e73ad5ff2 mm, x86/mm: Make ... |
581 |
arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); |
72b252aed mm: send one IPI ... |
582 |
tlb_ubc->flush_required = true; |
d950c9477 mm: defer flush o... |
583 584 |
/* |
3ea277194 mm, mprotect: flu... |
585 586 587 588 589 590 591 |
* Ensure compiler does not re-order the setting of tlb_flush_batched * before the PTE is cleared. */ barrier(); mm->tlb_flush_batched = true; /* |
d950c9477 mm: defer flush o... |
592 593 594 595 596 597 |
* If the PTE was dirty then it's best to assume it's writable. The * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() * before the page is queued for IO. */ if (writable) tlb_ubc->writable = true; |
72b252aed mm: send one IPI ... |
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 |
} /* * Returns true if the TLB flush should be deferred to the end of a batch of * unmap operations to reduce IPIs. */ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { bool should_defer = false; if (!(flags & TTU_BATCH_FLUSH)) return false; /* If remote CPUs need to be flushed then defer batch the flush */ if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) should_defer = true; put_cpu(); return should_defer; } |
3ea277194 mm, mprotect: flu... |
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 |
/* * Reclaim unmaps pages under the PTL but do not flush the TLB prior to * releasing the PTL if TLB flushes are batched. It's possible for a parallel * operation such as mprotect or munmap to race between reclaim unmapping * the page and flushing the page. If this race occurs, it potentially allows * access to data via a stale TLB entry. Tracking all mm's that have TLB * batching in flight would be expensive during reclaim so instead track * whether TLB batching occurred in the past and if so then do a flush here * if required. This will cost one additional flush per reclaim cycle paid * by the first operation at risk such as mprotect and mumap. * * This must be called under the PTL so that an access to tlb_flush_batched * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise * via the PTL. */ void flush_tlb_batched_pending(struct mm_struct *mm) { if (mm->tlb_flush_batched) { flush_tlb_mm(mm); /* * Do not allow the compiler to re-order the clearing of * tlb_flush_batched before the tlb is flushed. */ barrier(); mm->tlb_flush_batched = false; } } |
72b252aed mm: send one IPI ... |
647 |
#else |
c7ab0d2fd mm: convert try_t... |
648 |
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) |
72b252aed mm: send one IPI ... |
649 650 651 652 653 654 655 656 |
{ } static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { return false; } #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
1da177e4c Linux-2.6.12-rc2 |
657 |
/* |
bf89c8c86 mm/rmap.c: fix co... |
658 |
* At what user virtual address is page expected in vma? |
ab941e0ff rmap: remove anon... |
659 |
* Caller should check the page is actually part of the vma. |
1da177e4c Linux-2.6.12-rc2 |
660 661 662 |
*/ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { |
86c2ad199 mm rmap: remove v... |
663 |
unsigned long address; |
21d0d443c rmap: resurrect p... |
664 |
if (PageAnon(page)) { |
4829b906c ksm: fix page_add... |
665 666 667 668 669 670 671 |
struct anon_vma *page__anon_vma = page_anon_vma(page); /* * Note: swapoff's unuse_vma() is more efficient with this * check, and needs it to match anon_vma when KSM is active. */ if (!vma->anon_vma || !page__anon_vma || vma->anon_vma->root != page__anon_vma->root) |
21d0d443c rmap: resurrect p... |
672 |
return -EFAULT; |
27ba0644e rmap: drop suppor... |
673 674 |
} else if (page->mapping) { if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) |
1da177e4c Linux-2.6.12-rc2 |
675 676 677 |
return -EFAULT; } else return -EFAULT; |
86c2ad199 mm rmap: remove v... |
678 679 680 681 |
address = __vma_address(page, vma); if (unlikely(address < vma->vm_start || address >= vma->vm_end)) return -EFAULT; return address; |
1da177e4c Linux-2.6.12-rc2 |
682 |
} |
6219049ae mm: introduce mm_... |
683 684 685 |
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; |
c2febafc6 mm: convert gener... |
686 |
p4d_t *p4d; |
6219049ae mm: introduce mm_... |
687 688 |
pud_t *pud; pmd_t *pmd = NULL; |
f72e7dcdd mm: let mm_find_p... |
689 |
pmd_t pmde; |
6219049ae mm: introduce mm_... |
690 691 692 693 |
pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; |
c2febafc6 mm: convert gener... |
694 695 696 697 698 |
p4d = p4d_offset(pgd, address); if (!p4d_present(*p4d)) goto out; pud = pud_offset(p4d, address); |
6219049ae mm: introduce mm_... |
699 700 701 702 |
if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); |
f72e7dcdd mm: let mm_find_p... |
703 |
/* |
8809aa2d2 mm: clarify that ... |
704 |
* Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() |
f72e7dcdd mm: let mm_find_p... |
705 706 707 |
* without holding anon_vma lock for write. So when looking for a * genuine pmde (in which to find pte), test present and !THP together. */ |
e37c69827 mm: replace ACCES... |
708 709 |
pmde = *pmd; barrier(); |
f72e7dcdd mm: let mm_find_p... |
710 |
if (!pmd_present(pmde) || pmd_trans_huge(pmde)) |
6219049ae mm: introduce mm_... |
711 712 713 714 |
pmd = NULL; out: return pmd; } |
8749cfea1 mm: add page_chec... |
715 716 717 718 719 720 721 722 723 |
struct page_referenced_arg { int mapcount; int referenced; unsigned long vm_flags; struct mem_cgroup *memcg; }; /* * arg: page_referenced_arg will be passed */ |
e4b822227 mm: make rmap_one... |
724 |
static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, |
8749cfea1 mm: add page_chec... |
725 726 |
unsigned long address, void *arg) { |
8749cfea1 mm: add page_chec... |
727 |
struct page_referenced_arg *pra = arg; |
8eaedede8 mm: fix handling ... |
728 729 730 731 732 |
struct page_vma_mapped_walk pvmw = { .page = page, .vma = vma, .address = address, }; |
8749cfea1 mm: add page_chec... |
733 |
int referenced = 0; |
8eaedede8 mm: fix handling ... |
734 735 |
while (page_vma_mapped_walk(&pvmw)) { address = pvmw.address; |
b20ce5e03 mm: prepare page_... |
736 |
|
8eaedede8 mm: fix handling ... |
737 738 739 |
if (vma->vm_flags & VM_LOCKED) { page_vma_mapped_walk_done(&pvmw); pra->vm_flags |= VM_LOCKED; |
e4b822227 mm: make rmap_one... |
740 |
return false; /* To break the loop */ |
8eaedede8 mm: fix handling ... |
741 |
} |
71e3aac07 thp: transparent ... |
742 |
|
8eaedede8 mm: fix handling ... |
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 |
if (pvmw.pte) { if (ptep_clear_flush_young_notify(vma, address, pvmw.pte)) { /* * Don't treat a reference through * a sequentially read mapping as such. * If the page has been used in another mapping, * we will catch it; if this other mapping is * already gone, the unmap path will have set * PG_referenced or activated the page. */ if (likely(!(vma->vm_flags & VM_SEQ_READ))) referenced++; } } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_flush_young_notify(vma, address, pvmw.pmd)) |
8749cfea1 mm: add page_chec... |
760 |
referenced++; |
8eaedede8 mm: fix handling ... |
761 762 763 |
} else { /* unexpected pmd-mapped page? */ WARN_ON_ONCE(1); |
8749cfea1 mm: add page_chec... |
764 |
} |
8eaedede8 mm: fix handling ... |
765 766 |
pra->mapcount--; |
b20ce5e03 mm: prepare page_... |
767 |
} |
b20ce5e03 mm: prepare page_... |
768 |
|
33c3fc71c mm: introduce idl... |
769 770 771 772 |
if (referenced) clear_page_idle(page); if (test_and_clear_page_young(page)) referenced++; |
9f32624be mm/rmap: use rmap... |
773 774 775 |
if (referenced) { pra->referenced++; pra->vm_flags |= vma->vm_flags; |
1da177e4c Linux-2.6.12-rc2 |
776 |
} |
34bbd7040 [PATCH] adapt pag... |
777 |
|
9f32624be mm/rmap: use rmap... |
778 |
if (!pra->mapcount) |
e4b822227 mm: make rmap_one... |
779 |
return false; /* To break the loop */ |
9f32624be mm/rmap: use rmap... |
780 |
|
e4b822227 mm: make rmap_one... |
781 |
return true; |
1da177e4c Linux-2.6.12-rc2 |
782 |
} |
9f32624be mm/rmap: use rmap... |
783 |
static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) |
1da177e4c Linux-2.6.12-rc2 |
784 |
{ |
9f32624be mm/rmap: use rmap... |
785 786 |
struct page_referenced_arg *pra = arg; struct mem_cgroup *memcg = pra->memcg; |
1da177e4c Linux-2.6.12-rc2 |
787 |
|
9f32624be mm/rmap: use rmap... |
788 789 |
if (!mm_match_cgroup(vma->vm_mm, memcg)) return true; |
1da177e4c Linux-2.6.12-rc2 |
790 |
|
9f32624be mm/rmap: use rmap... |
791 |
return false; |
1da177e4c Linux-2.6.12-rc2 |
792 793 794 795 796 797 |
} /** * page_referenced - test if the page was referenced * @page: the page to test * @is_locked: caller holds lock on the page |
72835c86c mm: unify remaini... |
798 |
* @memcg: target memory cgroup |
6fe6b7e35 vmscan: report vm... |
799 |
* @vm_flags: collect encountered vma->vm_flags who actually referenced the page |
1da177e4c Linux-2.6.12-rc2 |
800 801 802 803 |
* * Quick test_and_clear_referenced for all mappings to a page, * returns the number of ptes which referenced the page. */ |
6fe6b7e35 vmscan: report vm... |
804 805 |
int page_referenced(struct page *page, int is_locked, |
72835c86c mm: unify remaini... |
806 |
struct mem_cgroup *memcg, |
6fe6b7e35 vmscan: report vm... |
807 |
unsigned long *vm_flags) |
1da177e4c Linux-2.6.12-rc2 |
808 |
{ |
5ad646880 ksm: let shared p... |
809 |
int we_locked = 0; |
9f32624be mm/rmap: use rmap... |
810 |
struct page_referenced_arg pra = { |
b20ce5e03 mm: prepare page_... |
811 |
.mapcount = total_mapcount(page), |
9f32624be mm/rmap: use rmap... |
812 813 814 815 816 817 818 |
.memcg = memcg, }; struct rmap_walk_control rwc = { .rmap_one = page_referenced_one, .arg = (void *)&pra, .anon_lock = page_lock_anon_vma_read, }; |
1da177e4c Linux-2.6.12-rc2 |
819 |
|
6fe6b7e35 vmscan: report vm... |
820 |
*vm_flags = 0; |
059d8442e mm/rmap.c: use th... |
821 |
if (!pra.mapcount) |
9f32624be mm/rmap: use rmap... |
822 823 824 825 826 827 828 829 830 |
return 0; if (!page_rmapping(page)) return 0; if (!is_locked && (!PageAnon(page) || PageKsm(page))) { we_locked = trylock_page(page); if (!we_locked) return 1; |
1da177e4c Linux-2.6.12-rc2 |
831 |
} |
9f32624be mm/rmap: use rmap... |
832 833 834 835 836 837 838 839 840 |
/* * If we are reclaiming on behalf of a cgroup, skip * counting on behalf of references from different * cgroups */ if (memcg) { rwc.invalid_vma = invalid_page_referenced_vma; } |
c24f386c6 mm: remove unnces... |
841 |
rmap_walk(page, &rwc); |
9f32624be mm/rmap: use rmap... |
842 843 844 845 846 847 |
*vm_flags = pra.vm_flags; if (we_locked) unlock_page(page); return pra.referenced; |
1da177e4c Linux-2.6.12-rc2 |
848 |
} |
e4b822227 mm: make rmap_one... |
849 |
static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, |
9853a407b mm/rmap: use rmap... |
850 |
unsigned long address, void *arg) |
d08b3851d [PATCH] mm: track... |
851 |
{ |
f27176cfc mm: convert page_... |
852 853 854 855 856 857 |
struct page_vma_mapped_walk pvmw = { .page = page, .vma = vma, .address = address, .flags = PVMW_SYNC, }; |
ac46d4f3c mm/mmu_notifier: ... |
858 |
struct mmu_notifier_range range; |
9853a407b mm/rmap: use rmap... |
859 |
int *cleaned = arg; |
d08b3851d [PATCH] mm: track... |
860 |
|
369ea8242 mm/rmap: update t... |
861 862 863 864 |
/* * We have to assume the worse case ie pmd for invalidation. Note that * the page can not be free from this function. */ |
7269f9999 mm/mmu_notifier: ... |
865 866 |
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, vma, vma->vm_mm, address, |
a50b854e0 mm: introduce pag... |
867 |
min(vma->vm_end, address + page_size(page))); |
ac46d4f3c mm/mmu_notifier: ... |
868 |
mmu_notifier_invalidate_range_start(&range); |
369ea8242 mm/rmap: update t... |
869 |
|
f27176cfc mm: convert page_... |
870 871 |
while (page_vma_mapped_walk(&pvmw)) { int ret = 0; |
369ea8242 mm/rmap: update t... |
872 |
|
1f18b2966 mm/rmap.c: remove... |
873 |
address = pvmw.address; |
f27176cfc mm: convert page_... |
874 875 876 877 878 879 |
if (pvmw.pte) { pte_t entry; pte_t *pte = pvmw.pte; if (!pte_dirty(*pte) && !pte_write(*pte)) continue; |
785373b4c Revert "rmap: do ... |
880 881 |
flush_cache_page(vma, address, pte_pfn(*pte)); entry = ptep_clear_flush(vma, address, pte); |
f27176cfc mm: convert page_... |
882 883 |
entry = pte_wrprotect(entry); entry = pte_mkclean(entry); |
785373b4c Revert "rmap: do ... |
884 |
set_pte_at(vma->vm_mm, address, pte, entry); |
f27176cfc mm: convert page_... |
885 886 887 888 889 890 891 892 |
ret = 1; } else { #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE pmd_t *pmd = pvmw.pmd; pmd_t entry; if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) continue; |
785373b4c Revert "rmap: do ... |
893 |
flush_cache_page(vma, address, page_to_pfn(page)); |
024eee0e8 mm: page_mkclean ... |
894 |
entry = pmdp_invalidate(vma, address, pmd); |
f27176cfc mm: convert page_... |
895 896 |
entry = pmd_wrprotect(entry); entry = pmd_mkclean(entry); |
785373b4c Revert "rmap: do ... |
897 |
set_pmd_at(vma->vm_mm, address, pmd, entry); |
f27176cfc mm: convert page_... |
898 899 900 901 902 903 |
ret = 1; #else /* unexpected pmd-mapped page? */ WARN_ON_ONCE(1); #endif } |
d08b3851d [PATCH] mm: track... |
904 |
|
0f10851ea mm/mmu_notifier: ... |
905 906 907 908 909 |
/* * No need to call mmu_notifier_invalidate_range() as we are * downgrading page table protection not changing it to point * to a new page. * |
ad56b738c docs/vm: rename d... |
910 |
* See Documentation/vm/mmu_notifier.rst |
0f10851ea mm/mmu_notifier: ... |
911 912 |
*/ if (ret) |
f27176cfc mm: convert page_... |
913 |
(*cleaned)++; |
c2fda5fed [PATCH] Fix up pa... |
914 |
} |
d08b3851d [PATCH] mm: track... |
915 |
|
ac46d4f3c mm/mmu_notifier: ... |
916 |
mmu_notifier_invalidate_range_end(&range); |
369ea8242 mm/rmap: update t... |
917 |
|
e4b822227 mm: make rmap_one... |
918 |
return true; |
d08b3851d [PATCH] mm: track... |
919 |
} |
9853a407b mm/rmap: use rmap... |
920 |
static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) |
d08b3851d [PATCH] mm: track... |
921 |
{ |
9853a407b mm/rmap: use rmap... |
922 |
if (vma->vm_flags & VM_SHARED) |
871beb8c3 mm/rmap: fix cocc... |
923 |
return false; |
d08b3851d [PATCH] mm: track... |
924 |
|
871beb8c3 mm/rmap: fix cocc... |
925 |
return true; |
d08b3851d [PATCH] mm: track... |
926 927 928 929 |
} int page_mkclean(struct page *page) { |
9853a407b mm/rmap: use rmap... |
930 931 932 933 934 935 936 |
int cleaned = 0; struct address_space *mapping; struct rmap_walk_control rwc = { .arg = (void *)&cleaned, .rmap_one = page_mkclean_one, .invalid_vma = invalid_mkclean_vma, }; |
d08b3851d [PATCH] mm: track... |
937 938 |
BUG_ON(!PageLocked(page)); |
9853a407b mm/rmap: use rmap... |
939 940 941 942 943 944 945 946 |
if (!page_mapped(page)) return 0; mapping = page_mapping(page); if (!mapping) return 0; rmap_walk(page, &rwc); |
d08b3851d [PATCH] mm: track... |
947 |
|
9853a407b mm/rmap: use rmap... |
948 |
return cleaned; |
d08b3851d [PATCH] mm: track... |
949 |
} |
60b59beaf fbdev: mm: Deferr... |
950 |
EXPORT_SYMBOL_GPL(page_mkclean); |
d08b3851d [PATCH] mm: track... |
951 |
|
1da177e4c Linux-2.6.12-rc2 |
952 |
/** |
c44b67432 rmap: move exclus... |
953 954 955 |
* page_move_anon_rmap - move a page to our anon_vma * @page: the page to move to our anon_vma * @vma: the vma the page belongs to |
c44b67432 rmap: move exclus... |
956 957 958 959 960 961 |
* * When a page belongs exclusively to one process after a COW event, * that page can be moved into the anon_vma that belongs to just that * process, so the rmap code will not search the parent or sibling * processes. */ |
5a49973d7 mm: thp: refix fa... |
962 |
void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) |
c44b67432 rmap: move exclus... |
963 964 |
{ struct anon_vma *anon_vma = vma->anon_vma; |
5a49973d7 mm: thp: refix fa... |
965 |
page = compound_head(page); |
309381fea mm: dump page whe... |
966 |
VM_BUG_ON_PAGE(!PageLocked(page), page); |
81d1b09c6 mm: convert a few... |
967 |
VM_BUG_ON_VMA(!anon_vma, vma); |
c44b67432 rmap: move exclus... |
968 969 |
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
414e2fb8c rmap: fix theoret... |
970 971 972 973 974 975 |
/* * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written * simultaneously, so a concurrent reader (eg page_referenced()'s * PageAnon()) will not see one without the other. */ WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); |
c44b67432 rmap: move exclus... |
976 977 978 |
} /** |
4e1c19750 Clean up __page_s... |
979 |
* __page_set_anon_rmap - set up new anonymous rmap |
451b9514a mm: remove __huge... |
980 |
* @page: Page or Hugepage to add to rmap |
4e1c19750 Clean up __page_s... |
981 982 |
* @vma: VM area to add page to. * @address: User virtual address of the mapping |
e8a03feb5 rmap: add exclusi... |
983 |
* @exclusive: the page is exclusively owned by the current process |
9617d95e6 [PATCH] mm: rmap ... |
984 985 |
*/ static void __page_set_anon_rmap(struct page *page, |
e8a03feb5 rmap: add exclusi... |
986 |
struct vm_area_struct *vma, unsigned long address, int exclusive) |
9617d95e6 [PATCH] mm: rmap ... |
987 |
{ |
e8a03feb5 rmap: add exclusi... |
988 |
struct anon_vma *anon_vma = vma->anon_vma; |
ea90002b0 anonvma: when set... |
989 |
|
e8a03feb5 rmap: add exclusi... |
990 |
BUG_ON(!anon_vma); |
ea90002b0 anonvma: when set... |
991 |
|
4e1c19750 Clean up __page_s... |
992 993 |
if (PageAnon(page)) return; |
ea90002b0 anonvma: when set... |
994 |
/* |
e8a03feb5 rmap: add exclusi... |
995 996 997 |
* If the page isn't exclusively mapped into this vma, * we must use the _oldest_ possible anon_vma for the * page mapping! |
ea90002b0 anonvma: when set... |
998 |
*/ |
4e1c19750 Clean up __page_s... |
999 |
if (!exclusive) |
288468c33 rmap: always use ... |
1000 |
anon_vma = anon_vma->root; |
9617d95e6 [PATCH] mm: rmap ... |
1001 |
|
9617d95e6 [PATCH] mm: rmap ... |
1002 1003 |
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; |
9617d95e6 [PATCH] mm: rmap ... |
1004 |
page->index = linear_page_index(vma, address); |
9617d95e6 [PATCH] mm: rmap ... |
1005 1006 1007 |
} /** |
43d8eac44 mm: rmap kernel-d... |
1008 |
* __page_check_anon_rmap - sanity check anonymous rmap addition |
c97a9e10e mm: more rmap che... |
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 |
* @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped */ static void __page_check_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { #ifdef CONFIG_DEBUG_VM /* * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * * We have exclusion against page_add_anon_rmap because the caller * always holds the page locked, except if called from page_dup_rmap, * in which case the page is already known to be setup. * * We have exclusion against page_add_new_anon_rmap because those pages * are initially only visible via the pagetables, and the pte is locked * over the call to page_add_new_anon_rmap. */ |
44ab57a06 rmap: add anon_vm... |
1029 |
BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); |
53f9263ba mm: rework mapcou... |
1030 |
BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); |
c97a9e10e mm: more rmap che... |
1031 1032 1033 1034 |
#endif } /** |
1da177e4c Linux-2.6.12-rc2 |
1035 1036 1037 1038 |
* page_add_anon_rmap - add pte mapping to an anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped |
d281ee614 rmap: add argumen... |
1039 |
* @compound: charge the page as compound or small page |
1da177e4c Linux-2.6.12-rc2 |
1040 |
* |
5ad646880 ksm: let shared p... |
1041 |
* The caller needs to hold the pte lock, and the page must be locked in |
80e148226 ksm: share anon p... |
1042 1043 1044 |
* the anon_vma case: to serialize mapping,index checking after setting, * and to ensure that PageAnon is not being upgraded racily to PageKsm * (but PageKsm is never downgraded to PageAnon). |
1da177e4c Linux-2.6.12-rc2 |
1045 1046 |
*/ void page_add_anon_rmap(struct page *page, |
d281ee614 rmap: add argumen... |
1047 |
struct vm_area_struct *vma, unsigned long address, bool compound) |
1da177e4c Linux-2.6.12-rc2 |
1048 |
{ |
d281ee614 rmap: add argumen... |
1049 |
do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); |
ad8c2ee80 rmap: add exclusi... |
1050 1051 1052 1053 1054 1055 1056 1057 |
} /* * Special version of the above for do_swap_page, which often runs * into pages that are exclusively owned by the current process. * Everybody else should continue to use page_add_anon_rmap above. */ void do_page_add_anon_rmap(struct page *page, |
d281ee614 rmap: add argumen... |
1058 |
struct vm_area_struct *vma, unsigned long address, int flags) |
ad8c2ee80 rmap: add exclusi... |
1059 |
{ |
53f9263ba mm: rework mapcou... |
1060 1061 |
bool compound = flags & RMAP_COMPOUND; bool first; |
e9b61f198 thp: reintroduce ... |
1062 1063 |
if (compound) { atomic_t *mapcount; |
53f9263ba mm: rework mapcou... |
1064 |
VM_BUG_ON_PAGE(!PageLocked(page), page); |
e9b61f198 thp: reintroduce ... |
1065 1066 1067 |
VM_BUG_ON_PAGE(!PageTransHuge(page), page); mapcount = compound_mapcount_ptr(page); first = atomic_inc_and_test(mapcount); |
53f9263ba mm: rework mapcou... |
1068 1069 1070 |
} else { first = atomic_inc_and_test(&page->_mapcount); } |
79134171d thp: transparent ... |
1071 |
if (first) { |
d281ee614 rmap: add argumen... |
1072 |
int nr = compound ? hpage_nr_pages(page) : 1; |
bea04b073 mm: use the light... |
1073 1074 1075 1076 1077 1078 |
/* * We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and * pte lock(a spinlock) is held, which implies preemption * disabled. */ |
65c453778 mm, rmap: account... |
1079 |
if (compound) |
11fb99898 mm: move most fil... |
1080 |
__inc_node_page_state(page, NR_ANON_THPS); |
4b9d0fab7 mm: rename NR_ANO... |
1081 |
__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); |
79134171d thp: transparent ... |
1082 |
} |
5ad646880 ksm: let shared p... |
1083 1084 |
if (unlikely(PageKsm(page))) return; |
309381fea mm: dump page whe... |
1085 |
VM_BUG_ON_PAGE(!PageLocked(page), page); |
53f9263ba mm: rework mapcou... |
1086 |
|
5dbe0af47 mm: fix kernel BU... |
1087 |
/* address might be in next vma when migration races vma_adjust */ |
5ad646880 ksm: let shared p... |
1088 |
if (first) |
d281ee614 rmap: add argumen... |
1089 1090 |
__page_set_anon_rmap(page, vma, address, flags & RMAP_EXCLUSIVE); |
69029cd55 memcg: remove ref... |
1091 |
else |
c97a9e10e mm: more rmap che... |
1092 |
__page_check_anon_rmap(page, vma, address); |
1da177e4c Linux-2.6.12-rc2 |
1093 |
} |
43d8eac44 mm: rmap kernel-d... |
1094 |
/** |
9617d95e6 [PATCH] mm: rmap ... |
1095 1096 1097 1098 |
* page_add_new_anon_rmap - add pte mapping to a new anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped |
d281ee614 rmap: add argumen... |
1099 |
* @compound: charge the page as compound or small page |
9617d95e6 [PATCH] mm: rmap ... |
1100 1101 1102 |
* * Same as page_add_anon_rmap but must only be called on *new* pages. * This means the inc-and-test can be bypassed. |
c97a9e10e mm: more rmap che... |
1103 |
* Page does not have to be locked. |
9617d95e6 [PATCH] mm: rmap ... |
1104 1105 |
*/ void page_add_new_anon_rmap(struct page *page, |
d281ee614 rmap: add argumen... |
1106 |
struct vm_area_struct *vma, unsigned long address, bool compound) |
9617d95e6 [PATCH] mm: rmap ... |
1107 |
{ |
d281ee614 rmap: add argumen... |
1108 |
int nr = compound ? hpage_nr_pages(page) : 1; |
81d1b09c6 mm: convert a few... |
1109 |
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
fa9949da5 mm: use __SetPage... |
1110 |
__SetPageSwapBacked(page); |
d281ee614 rmap: add argumen... |
1111 1112 |
if (compound) { VM_BUG_ON_PAGE(!PageTransHuge(page), page); |
53f9263ba mm: rework mapcou... |
1113 1114 |
/* increment count (starts at -1) */ atomic_set(compound_mapcount_ptr(page), 0); |
11fb99898 mm: move most fil... |
1115 |
__inc_node_page_state(page, NR_ANON_THPS); |
53f9263ba mm: rework mapcou... |
1116 1117 1118 1119 1120 |
} else { /* Anon THP always mapped first with PMD */ VM_BUG_ON_PAGE(PageTransCompound(page), page); /* increment count (starts at -1) */ atomic_set(&page->_mapcount, 0); |
d281ee614 rmap: add argumen... |
1121 |
} |
4b9d0fab7 mm: rename NR_ANO... |
1122 |
__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); |
e8a03feb5 rmap: add exclusi... |
1123 |
__page_set_anon_rmap(page, vma, address, 1); |
9617d95e6 [PATCH] mm: rmap ... |
1124 |
} |
1da177e4c Linux-2.6.12-rc2 |
1125 1126 1127 |
/** * page_add_file_rmap - add pte mapping to a file page * @page: the page to add the mapping to |
e8b098fc5 mm: kernel-doc: a... |
1128 |
* @compound: charge the page as compound or small page |
1da177e4c Linux-2.6.12-rc2 |
1129 |
* |
b8072f099 [PATCH] mm: updat... |
1130 |
* The caller needs to hold the pte lock. |
1da177e4c Linux-2.6.12-rc2 |
1131 |
*/ |
dd78fedde rmap: support fil... |
1132 |
void page_add_file_rmap(struct page *page, bool compound) |
1da177e4c Linux-2.6.12-rc2 |
1133 |
{ |
dd78fedde rmap: support fil... |
1134 1135 1136 |
int i, nr = 1; VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); |
62cccb8c8 mm: simplify lock... |
1137 |
lock_page_memcg(page); |
dd78fedde rmap: support fil... |
1138 1139 1140 1141 1142 1143 1144 |
if (compound && PageTransHuge(page)) { for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { if (atomic_inc_and_test(&page[i]._mapcount)) nr++; } if (!atomic_inc_and_test(compound_mapcount_ptr(page))) goto out; |
99cb0dbd4 mm,thp: add read-... |
1145 1146 1147 1148 |
if (PageSwapBacked(page)) __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); else __inc_node_page_state(page, NR_FILE_PMDMAPPED); |
dd78fedde rmap: support fil... |
1149 |
} else { |
c8efc390c mm, rmap: fix fal... |
1150 1151 |
if (PageTransCompound(page) && page_mapping(page)) { VM_WARN_ON_ONCE(!PageLocked(page)); |
9a73f61bd thp, mlock: do no... |
1152 1153 1154 1155 |
SetPageDoubleMap(compound_head(page)); if (PageMlocked(page)) clear_page_mlock(compound_head(page)); } |
dd78fedde rmap: support fil... |
1156 1157 |
if (!atomic_inc_and_test(&page->_mapcount)) goto out; |
d69b042f3 memcg: add file-b... |
1158 |
} |
00f3ca2c2 mm: memcontrol: p... |
1159 |
__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); |
dd78fedde rmap: support fil... |
1160 |
out: |
62cccb8c8 mm: simplify lock... |
1161 |
unlock_page_memcg(page); |
1da177e4c Linux-2.6.12-rc2 |
1162 |
} |
dd78fedde rmap: support fil... |
1163 |
static void page_remove_file_rmap(struct page *page, bool compound) |
8186eb6a7 mm: rmap: split o... |
1164 |
{ |
dd78fedde rmap: support fil... |
1165 |
int i, nr = 1; |
57dea93ac rmap: fix compoun... |
1166 |
VM_BUG_ON_PAGE(compound && !PageHead(page), page); |
62cccb8c8 mm: simplify lock... |
1167 |
lock_page_memcg(page); |
8186eb6a7 mm: rmap: split o... |
1168 |
|
53f9263ba mm: rework mapcou... |
1169 1170 1171 1172 |
/* Hugepages are not counted in NR_FILE_MAPPED for now. */ if (unlikely(PageHuge(page))) { /* hugetlb pages are always mapped with pmds */ atomic_dec(compound_mapcount_ptr(page)); |
8186eb6a7 mm: rmap: split o... |
1173 |
goto out; |
53f9263ba mm: rework mapcou... |
1174 |
} |
8186eb6a7 mm: rmap: split o... |
1175 |
|
53f9263ba mm: rework mapcou... |
1176 |
/* page still mapped by someone else? */ |
dd78fedde rmap: support fil... |
1177 1178 1179 1180 1181 1182 1183 |
if (compound && PageTransHuge(page)) { for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { if (atomic_add_negative(-1, &page[i]._mapcount)) nr++; } if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) goto out; |
99cb0dbd4 mm,thp: add read-... |
1184 1185 1186 1187 |
if (PageSwapBacked(page)) __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); else __dec_node_page_state(page, NR_FILE_PMDMAPPED); |
dd78fedde rmap: support fil... |
1188 1189 1190 1191 |
} else { if (!atomic_add_negative(-1, &page->_mapcount)) goto out; } |
8186eb6a7 mm: rmap: split o... |
1192 1193 |
/* |
00f3ca2c2 mm: memcontrol: p... |
1194 |
* We use the irq-unsafe __{inc|mod}_lruvec_page_state because |
8186eb6a7 mm: rmap: split o... |
1195 1196 1197 |
* these counters are not modified in interrupt context, and * pte lock(a spinlock) is held, which implies preemption disabled. */ |
00f3ca2c2 mm: memcontrol: p... |
1198 |
__mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); |
8186eb6a7 mm: rmap: split o... |
1199 1200 1201 1202 |
if (unlikely(PageMlocked(page))) clear_page_mlock(page); out: |
62cccb8c8 mm: simplify lock... |
1203 |
unlock_page_memcg(page); |
8186eb6a7 mm: rmap: split o... |
1204 |
} |
53f9263ba mm: rework mapcou... |
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 |
static void page_remove_anon_compound_rmap(struct page *page) { int i, nr; if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) return; /* Hugepages are not counted in NR_ANON_PAGES for now. */ if (unlikely(PageHuge(page))) return; if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return; |
11fb99898 mm: move most fil... |
1218 |
__dec_node_page_state(page, NR_ANON_THPS); |
53f9263ba mm: rework mapcou... |
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 |
if (TestClearPageDoubleMap(page)) { /* * Subpages can be mapped with PTEs too. Check how many of * themi are still mapped. */ for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { if (atomic_add_negative(-1, &page[i]._mapcount)) nr++; } } else { nr = HPAGE_PMD_NR; } |
e90309c9f thp: allow mlocke... |
1232 1233 |
if (unlikely(PageMlocked(page))) clear_page_mlock(page); |
9a982250f thp: introduce de... |
1234 |
if (nr) { |
4b9d0fab7 mm: rename NR_ANO... |
1235 |
__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr); |
9a982250f thp: introduce de... |
1236 1237 |
deferred_split_huge_page(page); } |
53f9263ba mm: rework mapcou... |
1238 |
} |
1da177e4c Linux-2.6.12-rc2 |
1239 1240 |
/** * page_remove_rmap - take down pte mapping from a page |
d281ee614 rmap: add argumen... |
1241 1242 |
* @page: page to remove mapping from * @compound: uncharge the page as compound or small page |
1da177e4c Linux-2.6.12-rc2 |
1243 |
* |
b8072f099 [PATCH] mm: updat... |
1244 |
* The caller needs to hold the pte lock. |
1da177e4c Linux-2.6.12-rc2 |
1245 |
*/ |
d281ee614 rmap: add argumen... |
1246 |
void page_remove_rmap(struct page *page, bool compound) |
1da177e4c Linux-2.6.12-rc2 |
1247 |
{ |
dd78fedde rmap: support fil... |
1248 1249 |
if (!PageAnon(page)) return page_remove_file_rmap(page, compound); |
89c06bd52 memcg: use new lo... |
1250 |
|
53f9263ba mm: rework mapcou... |
1251 1252 |
if (compound) return page_remove_anon_compound_rmap(page); |
b904dcfed mm: clean up page... |
1253 1254 |
/* page still mapped by someone else? */ if (!atomic_add_negative(-1, &page->_mapcount)) |
8186eb6a7 mm: rmap: split o... |
1255 |
return; |
b904dcfed mm: clean up page... |
1256 |
/* |
bea04b073 mm: use the light... |
1257 1258 |
* We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and |
bea04b073 mm: use the light... |
1259 |
* pte lock(a spinlock) is held, which implies preemption disabled. |
0fe6e20b9 hugetlb, rmap: ad... |
1260 |
*/ |
4b9d0fab7 mm: rename NR_ANO... |
1261 |
__dec_node_page_state(page, NR_ANON_MAPPED); |
8186eb6a7 mm: rmap: split o... |
1262 |
|
e6c509f85 mm: use clear_pag... |
1263 1264 |
if (unlikely(PageMlocked(page))) clear_page_mlock(page); |
8186eb6a7 mm: rmap: split o... |
1265 |
|
9a982250f thp: introduce de... |
1266 1267 |
if (PageTransCompound(page)) deferred_split_huge_page(compound_head(page)); |
b904dcfed mm: clean up page... |
1268 1269 1270 1271 |
/* * It would be tidy to reset the PageAnon mapping here, * but that might overwrite a racing page_add_anon_rmap * which increments mapcount after us but sets mapping |
2d4894b5d mm: remove cold p... |
1272 |
* before us: so leave the reset to free_unref_page, |
b904dcfed mm: clean up page... |
1273 1274 1275 1276 |
* and remember that it's only reliable while mapped. * Leaving it set also helps swapoff to reinstate ptes * faster for those pages still in swapcache. */ |
1da177e4c Linux-2.6.12-rc2 |
1277 1278 1279 |
} /* |
526295064 mm/rmap: use rmap... |
1280 |
* @arg: enum ttu_flags will be passed to this argument |
1da177e4c Linux-2.6.12-rc2 |
1281 |
*/ |
e4b822227 mm: make rmap_one... |
1282 |
static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, |
526295064 mm/rmap: use rmap... |
1283 |
unsigned long address, void *arg) |
1da177e4c Linux-2.6.12-rc2 |
1284 1285 |
{ struct mm_struct *mm = vma->vm_mm; |
c7ab0d2fd mm: convert try_t... |
1286 1287 1288 1289 1290 |
struct page_vma_mapped_walk pvmw = { .page = page, .vma = vma, .address = address, }; |
1da177e4c Linux-2.6.12-rc2 |
1291 |
pte_t pteval; |
c7ab0d2fd mm: convert try_t... |
1292 |
struct page *subpage; |
785373b4c Revert "rmap: do ... |
1293 |
bool ret = true; |
ac46d4f3c mm/mmu_notifier: ... |
1294 |
struct mmu_notifier_range range; |
802a3a92a mm: reclaim MADV_... |
1295 |
enum ttu_flags flags = (enum ttu_flags)arg; |
1da177e4c Linux-2.6.12-rc2 |
1296 |
|
b87537d9e mm: rmap use pte ... |
1297 1298 |
/* munlock has nothing to gain from examining un-locked vmas */ if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) |
e4b822227 mm: make rmap_one... |
1299 |
return true; |
b87537d9e mm: rmap use pte ... |
1300 |
|
a5430dda8 mm/migrate: suppo... |
1301 1302 1303 |
if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && is_zone_device_page(page) && !is_device_private_page(page)) return true; |
fec89c109 thp: rewrite free... |
1304 1305 |
if (flags & TTU_SPLIT_HUGE_PMD) { split_huge_pmd_address(vma, address, |
b5ff8161e mm: thp: introduc... |
1306 |
flags & TTU_SPLIT_FREEZE, page); |
fec89c109 thp: rewrite free... |
1307 |
} |
369ea8242 mm/rmap: update t... |
1308 |
/* |
017b1660d mm: migration: fi... |
1309 1310 1311 1312 1313 1314 |
* For THP, we have to assume the worse case ie pmd for invalidation. * For hugetlb, it could be much worse if we need to do pud * invalidation in the case of pmd sharing. * * Note that the page can not be free in this function as call of * try_to_unmap() must hold a reference on the page. |
369ea8242 mm/rmap: update t... |
1315 |
*/ |
7269f9999 mm/mmu_notifier: ... |
1316 |
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, |
6f4f13e8d mm/mmu_notifier: ... |
1317 |
address, |
a50b854e0 mm: introduce pag... |
1318 |
min(vma->vm_end, address + page_size(page))); |
017b1660d mm: migration: fi... |
1319 1320 1321 1322 1323 |
if (PageHuge(page)) { /* * If sharing is possible, start and end will be adjusted * accordingly. */ |
ac46d4f3c mm/mmu_notifier: ... |
1324 1325 |
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); |
017b1660d mm: migration: fi... |
1326 |
} |
ac46d4f3c mm/mmu_notifier: ... |
1327 |
mmu_notifier_invalidate_range_start(&range); |
369ea8242 mm/rmap: update t... |
1328 |
|
c7ab0d2fd mm: convert try_t... |
1329 |
while (page_vma_mapped_walk(&pvmw)) { |
616b83715 mm: thp: enable t... |
1330 1331 1332 1333 |
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION /* PMD-mapped THP migration entry */ if (!pvmw.pte && (flags & TTU_MIGRATION)) { VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); |
616b83715 mm: thp: enable t... |
1334 1335 1336 1337 |
set_pmd_migration_entry(&pvmw, page); continue; } #endif |
c7ab0d2fd mm: convert try_t... |
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 |
/* * If the page is mlock()d, we cannot swap it out. * If it's recently referenced (perhaps page_referenced * skipped over this mm) then we should reactivate it. */ if (!(flags & TTU_IGNORE_MLOCK)) { if (vma->vm_flags & VM_LOCKED) { /* PTE-mapped THP are never mlocked */ if (!PageTransCompound(page)) { /* * Holding pte lock, we do *not* need * mmap_sem here */ mlock_vma_page(page); } |
e4b822227 mm: make rmap_one... |
1353 |
ret = false; |
c7ab0d2fd mm: convert try_t... |
1354 1355 |
page_vma_mapped_walk_done(&pvmw); break; |
9a73f61bd thp, mlock: do no... |
1356 |
} |
c7ab0d2fd mm: convert try_t... |
1357 1358 |
if (flags & TTU_MUNLOCK) continue; |
b87537d9e mm: rmap use pte ... |
1359 |
} |
c7ab0d2fd mm: convert try_t... |
1360 |
|
8346242a7 rmap: fix NULL-po... |
1361 1362 1363 1364 |
/* Unexpected PMD-mapped THP? */ VM_BUG_ON_PAGE(!pvmw.pte, page); subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); |
785373b4c Revert "rmap: do ... |
1365 |
address = pvmw.address; |
017b1660d mm: migration: fi... |
1366 1367 1368 1369 1370 1371 1372 1373 1374 |
if (PageHuge(page)) { if (huge_pmd_unshare(mm, &address, pvmw.pte)) { /* * huge_pmd_unshare unmapped an entire PMD * page. There is no way of knowing exactly * which PMDs may be cached for this mm, so * we must flush them all. start/end were * already adjusted above to cover this range. */ |
ac46d4f3c mm/mmu_notifier: ... |
1375 1376 1377 1378 |
flush_cache_range(vma, range.start, range.end); flush_tlb_range(vma, range.start, range.end); mmu_notifier_invalidate_range(mm, range.start, range.end); |
017b1660d mm: migration: fi... |
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 |
/* * The ref count of the PMD page was dropped * which is part of the way map counting * is done for shared PMDs. Return 'true' * here. When there is no other sharing, * huge_pmd_unshare returns false and we will * unmap the actual page and drop map count * to zero. */ page_vma_mapped_walk_done(&pvmw); break; } } |
8346242a7 rmap: fix NULL-po... |
1393 |
|
a5430dda8 mm/migrate: suppo... |
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 |
if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && is_zone_device_page(page)) { swp_entry_t entry; pte_t swp_pte; pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte); /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ entry = make_migration_entry(page, 0); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); |
0f10851ea mm/mmu_notifier: ... |
1412 1413 1414 |
/* * No need to invalidate here it will synchronize on * against the special swap migration pte. |
1de13ee59 mm/hmm: fix bad s... |
1415 1416 1417 1418 1419 1420 1421 |
* * The assignment to subpage above was computed from a * swap PTE which results in an invalid pointer. * Since only PAGE_SIZE pages can currently be * migrated, just set it to page. This will need to be * changed when hugepage migrations to device private * memory are supported. |
0f10851ea mm/mmu_notifier: ... |
1422 |
*/ |
1de13ee59 mm/hmm: fix bad s... |
1423 |
subpage = page; |
a5430dda8 mm/migrate: suppo... |
1424 1425 |
goto discard; } |
c7ab0d2fd mm: convert try_t... |
1426 |
if (!(flags & TTU_IGNORE_ACCESS)) { |
785373b4c Revert "rmap: do ... |
1427 |
if (ptep_clear_flush_young_notify(vma, address, |
c7ab0d2fd mm: convert try_t... |
1428 |
pvmw.pte)) { |
e4b822227 mm: make rmap_one... |
1429 |
ret = false; |
c7ab0d2fd mm: convert try_t... |
1430 1431 1432 |
page_vma_mapped_walk_done(&pvmw); break; } |
b291f0003 mlock: mlocked pa... |
1433 |
} |
1da177e4c Linux-2.6.12-rc2 |
1434 |
|
c7ab0d2fd mm: convert try_t... |
1435 |
/* Nuke the page table entry. */ |
785373b4c Revert "rmap: do ... |
1436 |
flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); |
c7ab0d2fd mm: convert try_t... |
1437 1438 1439 1440 1441 1442 1443 1444 1445 |
if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially * a remote CPU could still be writing to the page. * If the entry was previously clean then the * architecture must guarantee that a clear->dirty * transition on a cached TLB entry is written through * and traps if the PTE is unmapped. */ |
785373b4c Revert "rmap: do ... |
1446 |
pteval = ptep_get_and_clear(mm, address, pvmw.pte); |
c7ab0d2fd mm: convert try_t... |
1447 1448 1449 |
set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); } else { |
785373b4c Revert "rmap: do ... |
1450 |
pteval = ptep_clear_flush(vma, address, pvmw.pte); |
c7ab0d2fd mm: convert try_t... |
1451 |
} |
72b252aed mm: send one IPI ... |
1452 |
|
c7ab0d2fd mm: convert try_t... |
1453 1454 1455 |
/* Move the dirty bit to the page. Now the pte is gone. */ if (pte_dirty(pteval)) set_page_dirty(page); |
1da177e4c Linux-2.6.12-rc2 |
1456 |
|
c7ab0d2fd mm: convert try_t... |
1457 1458 |
/* Update high watermark before we lower rss */ update_hiwater_rss(mm); |
1da177e4c Linux-2.6.12-rc2 |
1459 |
|
c7ab0d2fd mm: convert try_t... |
1460 |
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { |
5fd27b8e7 mm: rmap: use cor... |
1461 |
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); |
c7ab0d2fd mm: convert try_t... |
1462 |
if (PageHuge(page)) { |
d8c6546b1 mm: introduce com... |
1463 |
hugetlb_count_sub(compound_nr(page), mm); |
785373b4c Revert "rmap: do ... |
1464 |
set_huge_swap_pte_at(mm, address, |
5fd27b8e7 mm: rmap: use cor... |
1465 1466 |
pvmw.pte, pteval, vma_mmu_pagesize(vma)); |
c7ab0d2fd mm: convert try_t... |
1467 1468 |
} else { dec_mm_counter(mm, mm_counter(page)); |
785373b4c Revert "rmap: do ... |
1469 |
set_pte_at(mm, address, pvmw.pte, pteval); |
c7ab0d2fd mm: convert try_t... |
1470 |
} |
365e9c87a [PATCH] mm: updat... |
1471 |
|
bce73e484 mm: do not drop u... |
1472 |
} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { |
c7ab0d2fd mm: convert try_t... |
1473 1474 1475 1476 |
/* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. |
bce73e484 mm: do not drop u... |
1477 1478 1479 1480 1481 |
* A future reference will then fault in a new zero * page. When userfaultfd is active, we must not drop * this page though, as its main user (postcopy * migration) will not expect userfaults on already * copied pages. |
c7ab0d2fd mm: convert try_t... |
1482 |
*/ |
eca56ff90 mm, shmem: add in... |
1483 |
dec_mm_counter(mm, mm_counter(page)); |
0f10851ea mm/mmu_notifier: ... |
1484 1485 1486 |
/* We have to invalidate as we cleared the pte */ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE); |
c7ab0d2fd mm: convert try_t... |
1487 |
} else if (IS_ENABLED(CONFIG_MIGRATION) && |
b5ff8161e mm: thp: introduc... |
1488 |
(flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) { |
c7ab0d2fd mm: convert try_t... |
1489 1490 |
swp_entry_t entry; pte_t swp_pte; |
ca827d55e mm, swap: Add inf... |
1491 1492 1493 1494 1495 1496 1497 |
if (arch_unmap_one(mm, vma, address, pteval) < 0) { set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } |
c7ab0d2fd mm: convert try_t... |
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 |
/* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ entry = make_migration_entry(subpage, pte_write(pteval)); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); |
785373b4c Revert "rmap: do ... |
1508 |
set_pte_at(mm, address, pvmw.pte, swp_pte); |
0f10851ea mm/mmu_notifier: ... |
1509 1510 1511 1512 |
/* * No need to invalidate here it will synchronize on * against the special swap migration pte. */ |
c7ab0d2fd mm: convert try_t... |
1513 1514 1515 1516 1517 1518 1519 |
} else if (PageAnon(page)) { swp_entry_t entry = { .val = page_private(subpage) }; pte_t swp_pte; /* * Store the swap location in the pte. * See handle_pte_fault() ... */ |
eb94a8784 mm: fix lazyfree ... |
1520 1521 |
if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { WARN_ON_ONCE(1); |
83612a948 mm: remove SWAP_[... |
1522 |
ret = false; |
369ea8242 mm/rmap: update t... |
1523 |
/* We have to invalidate as we cleared the pte */ |
0f10851ea mm/mmu_notifier: ... |
1524 1525 |
mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE); |
eb94a8784 mm: fix lazyfree ... |
1526 1527 1528 |
page_vma_mapped_walk_done(&pvmw); break; } |
c7ab0d2fd mm: convert try_t... |
1529 |
|
802a3a92a mm: reclaim MADV_... |
1530 1531 1532 |
/* MADV_FREE page check */ if (!PageSwapBacked(page)) { if (!PageDirty(page)) { |
0f10851ea mm/mmu_notifier: ... |
1533 1534 1535 |
/* Invalidate as we cleared the pte */ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE); |
802a3a92a mm: reclaim MADV_... |
1536 1537 1538 1539 1540 1541 1542 1543 |
dec_mm_counter(mm, MM_ANONPAGES); goto discard; } /* * If the page was redirtied, it cannot be * discarded. Remap the page to page table. */ |
785373b4c Revert "rmap: do ... |
1544 |
set_pte_at(mm, address, pvmw.pte, pteval); |
18863d3a3 mm: remove SWAP_D... |
1545 |
SetPageSwapBacked(page); |
e4b822227 mm: make rmap_one... |
1546 |
ret = false; |
802a3a92a mm: reclaim MADV_... |
1547 1548 |
page_vma_mapped_walk_done(&pvmw); break; |
c7ab0d2fd mm: convert try_t... |
1549 |
} |
854e9ed09 mm: support madvi... |
1550 |
|
c7ab0d2fd mm: convert try_t... |
1551 |
if (swap_duplicate(entry) < 0) { |
785373b4c Revert "rmap: do ... |
1552 |
set_pte_at(mm, address, pvmw.pte, pteval); |
e4b822227 mm: make rmap_one... |
1553 |
ret = false; |
c7ab0d2fd mm: convert try_t... |
1554 1555 1556 |
page_vma_mapped_walk_done(&pvmw); break; } |
ca827d55e mm, swap: Add inf... |
1557 1558 1559 1560 1561 1562 |
if (arch_unmap_one(mm, vma, address, pteval) < 0) { set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } |
c7ab0d2fd mm: convert try_t... |
1563 1564 1565 1566 1567 1568 |
if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) list_add(&mm->mmlist, &init_mm.mmlist); spin_unlock(&mmlist_lock); } |
854e9ed09 mm: support madvi... |
1569 |
dec_mm_counter(mm, MM_ANONPAGES); |
c7ab0d2fd mm: convert try_t... |
1570 1571 1572 1573 |
inc_mm_counter(mm, MM_SWAPENTS); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); |
785373b4c Revert "rmap: do ... |
1574 |
set_pte_at(mm, address, pvmw.pte, swp_pte); |
0f10851ea mm/mmu_notifier: ... |
1575 1576 1577 1578 1579 |
/* Invalidate as we cleared the pte */ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE); } else { /* |
906f9cdfc mm/huge_memory: r... |
1580 1581 1582 |
* This is a locked file-backed page, thus it cannot * be removed from the page cache and replaced by a new * page before mmu_notifier_invalidate_range_end, so no |
0f10851ea mm/mmu_notifier: ... |
1583 1584 1585 1586 |
* concurrent thread might update its page table to * point at new page while a device still is using this * page. * |
ad56b738c docs/vm: rename d... |
1587 |
* See Documentation/vm/mmu_notifier.rst |
0f10851ea mm/mmu_notifier: ... |
1588 |
*/ |
c7ab0d2fd mm: convert try_t... |
1589 |
dec_mm_counter(mm, mm_counter_file(page)); |
0f10851ea mm/mmu_notifier: ... |
1590 |
} |
854e9ed09 mm: support madvi... |
1591 |
discard: |
0f10851ea mm/mmu_notifier: ... |
1592 1593 1594 1595 1596 |
/* * No need to call mmu_notifier_invalidate_range() it has be * done above for all cases requiring it to happen under page * table lock before mmu_notifier_invalidate_range_end() * |
ad56b738c docs/vm: rename d... |
1597 |
* See Documentation/vm/mmu_notifier.rst |
0f10851ea mm/mmu_notifier: ... |
1598 |
*/ |
c7ab0d2fd mm: convert try_t... |
1599 1600 |
page_remove_rmap(subpage, PageHuge(page)); put_page(page); |
c7ab0d2fd mm: convert try_t... |
1601 |
} |
369ea8242 mm/rmap: update t... |
1602 |
|
ac46d4f3c mm/mmu_notifier: ... |
1603 |
mmu_notifier_invalidate_range_end(&range); |
369ea8242 mm/rmap: update t... |
1604 |
|
caed0f486 mm: simplify try_... |
1605 |
return ret; |
1da177e4c Linux-2.6.12-rc2 |
1606 |
} |
71e3aac07 thp: transparent ... |
1607 |
bool is_vma_temporary_stack(struct vm_area_struct *vma) |
a8bef8ff6 mm: migration: av... |
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 |
{ int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); if (!maybe_stack) return false; if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == VM_STACK_INCOMPLETE_SETUP) return true; return false; } |
526295064 mm/rmap: use rmap... |
1620 1621 1622 1623 |
static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) { return is_vma_temporary_stack(vma); } |
2a52bcbcc rmap: extend try_... |
1624 |
static int page_mapcount_is_zero(struct page *page) |
526295064 mm/rmap: use rmap... |
1625 |
{ |
c7ab0d2fd mm: convert try_t... |
1626 |
return !total_mapcount(page); |
2a52bcbcc rmap: extend try_... |
1627 |
} |
526295064 mm/rmap: use rmap... |
1628 |
|
1da177e4c Linux-2.6.12-rc2 |
1629 1630 1631 |
/** * try_to_unmap - try to remove all page table mappings to a page * @page: the page to get unmapped |
14fa31b89 HWPOISON: Use bit... |
1632 |
* @flags: action and flags |
1da177e4c Linux-2.6.12-rc2 |
1633 1634 1635 |
* * Tries to remove all the page table entries which are mapping this * page, used in the pageout path. Caller must hold the page lock. |
1da177e4c Linux-2.6.12-rc2 |
1636 |
* |
666e5a406 mm: make ttu's re... |
1637 |
* If unmap is successful, return true. Otherwise, false. |
1da177e4c Linux-2.6.12-rc2 |
1638 |
*/ |
666e5a406 mm: make ttu's re... |
1639 |
bool try_to_unmap(struct page *page, enum ttu_flags flags) |
1da177e4c Linux-2.6.12-rc2 |
1640 |
{ |
526295064 mm/rmap: use rmap... |
1641 1642 |
struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, |
802a3a92a mm: reclaim MADV_... |
1643 |
.arg = (void *)flags, |
2a52bcbcc rmap: extend try_... |
1644 |
.done = page_mapcount_is_zero, |
526295064 mm/rmap: use rmap... |
1645 1646 |
.anon_lock = page_lock_anon_vma_read, }; |
1da177e4c Linux-2.6.12-rc2 |
1647 |
|
526295064 mm/rmap: use rmap... |
1648 1649 1650 1651 1652 1653 1654 1655 |
/* * During exec, a temporary VMA is setup and later moved. * The VMA is moved under the anon_vma lock but not the * page tables leading to a race where migration cannot * find the migration ptes. Rather than increasing the * locking requirements of exec(), migration skips * temporary VMAs until after exec() completes. */ |
b5ff8161e mm: thp: introduc... |
1656 1657 |
if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE)) && !PageKsm(page) && PageAnon(page)) |
526295064 mm/rmap: use rmap... |
1658 |
rwc.invalid_vma = invalid_migration_vma; |
2a52bcbcc rmap: extend try_... |
1659 |
if (flags & TTU_RMAP_LOCKED) |
33fc80e25 mm: remove SWAP_A... |
1660 |
rmap_walk_locked(page, &rwc); |
2a52bcbcc rmap: extend try_... |
1661 |
else |
33fc80e25 mm: remove SWAP_A... |
1662 |
rmap_walk(page, &rwc); |
526295064 mm/rmap: use rmap... |
1663 |
|
666e5a406 mm: make ttu's re... |
1664 |
return !page_mapcount(page) ? true : false; |
1da177e4c Linux-2.6.12-rc2 |
1665 |
} |
81b4082dc [PATCH] mm: rmap.... |
1666 |
|
2a52bcbcc rmap: extend try_... |
1667 1668 1669 1670 |
static int page_not_mapped(struct page *page) { return !page_mapped(page); }; |
b291f0003 mlock: mlocked pa... |
1671 1672 1673 1674 1675 1676 1677 |
/** * try_to_munlock - try to munlock a page * @page: the page to be munlocked * * Called from munlock code. Checks all of the VMAs mapping the page * to make sure nobody else has this page mlocked. The page will be * returned with PG_mlocked cleared if no other vmas have it mlocked. |
b291f0003 mlock: mlocked pa... |
1678 |
*/ |
854e9ed09 mm: support madvi... |
1679 |
|
192d72325 mm: make try_to_m... |
1680 1681 |
void try_to_munlock(struct page *page) { |
e8351ac9b mm/rmap: use rmap... |
1682 1683 |
struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, |
802a3a92a mm: reclaim MADV_... |
1684 |
.arg = (void *)TTU_MUNLOCK, |
e8351ac9b mm/rmap: use rmap... |
1685 |
.done = page_not_mapped, |
e8351ac9b mm/rmap: use rmap... |
1686 1687 1688 |
.anon_lock = page_lock_anon_vma_read, }; |
309381fea mm: dump page whe... |
1689 |
VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); |
192d72325 mm: make try_to_m... |
1690 |
VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); |
b291f0003 mlock: mlocked pa... |
1691 |
|
192d72325 mm: make try_to_m... |
1692 |
rmap_walk(page, &rwc); |
b291f0003 mlock: mlocked pa... |
1693 |
} |
e9995ef97 ksm: rmap_walk to... |
1694 |
|
01d8b20de mm: simplify anon... |
1695 |
void __put_anon_vma(struct anon_vma *anon_vma) |
76545066c mm: extend KSM re... |
1696 |
{ |
01d8b20de mm: simplify anon... |
1697 |
struct anon_vma *root = anon_vma->root; |
76545066c mm: extend KSM re... |
1698 |
|
624483f3e mm: rmap: fix use... |
1699 |
anon_vma_free(anon_vma); |
01d8b20de mm: simplify anon... |
1700 1701 |
if (root != anon_vma && atomic_dec_and_test(&root->refcount)) anon_vma_free(root); |
76545066c mm: extend KSM re... |
1702 |
} |
76545066c mm: extend KSM re... |
1703 |
|
0dd1c7bbc mm/rmap: extend r... |
1704 1705 |
static struct anon_vma *rmap_walk_anon_lock(struct page *page, struct rmap_walk_control *rwc) |
faecd8dd8 mm/rmap: factor l... |
1706 1707 |
{ struct anon_vma *anon_vma; |
0dd1c7bbc mm/rmap: extend r... |
1708 1709 |
if (rwc->anon_lock) return rwc->anon_lock(page); |
faecd8dd8 mm/rmap: factor l... |
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 |
/* * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() * because that depends on page_mapped(); but not all its usages * are holding mmap_sem. Users without mmap_sem are required to * take a reference count to prevent the anon_vma disappearing */ anon_vma = page_anon_vma(page); if (!anon_vma) return NULL; anon_vma_lock_read(anon_vma); return anon_vma; } |
e9995ef97 ksm: rmap_walk to... |
1723 |
/* |
e8351ac9b mm/rmap: use rmap... |
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 |
* rmap_walk_anon - do something to anonymous page using the object-based * rmap method * @page: the page to be handled * @rwc: control variable according to each walk type * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. * * When called from try_to_munlock(), the mmap_sem of the mm containing the vma * where the page was found will be held for write. So, we won't recheck * vm_flags for that VMA. That should be OK, because that vma shouldn't be * LOCKED. |
e9995ef97 ksm: rmap_walk to... |
1736 |
*/ |
1df631ae1 mm: make rmap_wal... |
1737 |
static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, |
b97731992 rmap: introduce r... |
1738 |
bool locked) |
e9995ef97 ksm: rmap_walk to... |
1739 1740 |
{ struct anon_vma *anon_vma; |
a8fa41ad2 mm, rmap: check a... |
1741 |
pgoff_t pgoff_start, pgoff_end; |
5beb49305 mm: change anon_v... |
1742 |
struct anon_vma_chain *avc; |
e9995ef97 ksm: rmap_walk to... |
1743 |
|
b97731992 rmap: introduce r... |
1744 1745 1746 1747 1748 1749 1750 |
if (locked) { anon_vma = page_anon_vma(page); /* anon_vma disappear under us? */ VM_BUG_ON_PAGE(!anon_vma, page); } else { anon_vma = rmap_walk_anon_lock(page, rwc); } |
e9995ef97 ksm: rmap_walk to... |
1751 |
if (!anon_vma) |
1df631ae1 mm: make rmap_wal... |
1752 |
return; |
faecd8dd8 mm/rmap: factor l... |
1753 |
|
a8fa41ad2 mm, rmap: check a... |
1754 1755 1756 1757 |
pgoff_start = page_to_pgoff(page); pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { |
5beb49305 mm: change anon_v... |
1758 |
struct vm_area_struct *vma = avc->vma; |
e9995ef97 ksm: rmap_walk to... |
1759 |
unsigned long address = vma_address(page, vma); |
0dd1c7bbc mm/rmap: extend r... |
1760 |
|
ad12695f1 ksm: add cond_res... |
1761 |
cond_resched(); |
0dd1c7bbc mm/rmap: extend r... |
1762 1763 |
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; |
e4b822227 mm: make rmap_one... |
1764 |
if (!rwc->rmap_one(page, vma, address, rwc->arg)) |
e9995ef97 ksm: rmap_walk to... |
1765 |
break; |
0dd1c7bbc mm/rmap: extend r... |
1766 1767 |
if (rwc->done && rwc->done(page)) break; |
e9995ef97 ksm: rmap_walk to... |
1768 |
} |
b97731992 rmap: introduce r... |
1769 1770 1771 |
if (!locked) anon_vma_unlock_read(anon_vma); |
e9995ef97 ksm: rmap_walk to... |
1772 |
} |
e8351ac9b mm/rmap: use rmap... |
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 |
/* * rmap_walk_file - do something to file page using the object-based rmap method * @page: the page to be handled * @rwc: control variable according to each walk type * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the address_space struct it points to. * * When called from try_to_munlock(), the mmap_sem of the mm containing the vma * where the page was found will be held for write. So, we won't recheck * vm_flags for that VMA. That should be OK, because that vma shouldn't be * LOCKED. */ |
1df631ae1 mm: make rmap_wal... |
1786 |
static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, |
b97731992 rmap: introduce r... |
1787 |
bool locked) |
e9995ef97 ksm: rmap_walk to... |
1788 |
{ |
b97731992 rmap: introduce r... |
1789 |
struct address_space *mapping = page_mapping(page); |
a8fa41ad2 mm, rmap: check a... |
1790 |
pgoff_t pgoff_start, pgoff_end; |
e9995ef97 ksm: rmap_walk to... |
1791 |
struct vm_area_struct *vma; |
e9995ef97 ksm: rmap_walk to... |
1792 |
|
9f32624be mm/rmap: use rmap... |
1793 1794 1795 1796 |
/* * The page lock not only makes sure that page->mapping cannot * suddenly be NULLified by truncation, it makes sure that the * structure at mapping cannot be freed and reused yet, |
c8c06efa8 mm: convert i_mma... |
1797 |
* so we can safely take mapping->i_mmap_rwsem. |
9f32624be mm/rmap: use rmap... |
1798 |
*/ |
81d1b09c6 mm: convert a few... |
1799 |
VM_BUG_ON_PAGE(!PageLocked(page), page); |
9f32624be mm/rmap: use rmap... |
1800 |
|
e9995ef97 ksm: rmap_walk to... |
1801 |
if (!mapping) |
1df631ae1 mm: make rmap_wal... |
1802 |
return; |
3dec0ba0b mm/rmap: share th... |
1803 |
|
a8fa41ad2 mm, rmap: check a... |
1804 1805 |
pgoff_start = page_to_pgoff(page); pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; |
b97731992 rmap: introduce r... |
1806 1807 |
if (!locked) i_mmap_lock_read(mapping); |
a8fa41ad2 mm, rmap: check a... |
1808 1809 |
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { |
e9995ef97 ksm: rmap_walk to... |
1810 |
unsigned long address = vma_address(page, vma); |
0dd1c7bbc mm/rmap: extend r... |
1811 |
|
ad12695f1 ksm: add cond_res... |
1812 |
cond_resched(); |
0dd1c7bbc mm/rmap: extend r... |
1813 1814 |
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; |
e4b822227 mm: make rmap_one... |
1815 |
if (!rwc->rmap_one(page, vma, address, rwc->arg)) |
0dd1c7bbc mm/rmap: extend r... |
1816 1817 1818 |
goto done; if (rwc->done && rwc->done(page)) goto done; |
e9995ef97 ksm: rmap_walk to... |
1819 |
} |
0dd1c7bbc mm/rmap: extend r... |
1820 |
|
0dd1c7bbc mm/rmap: extend r... |
1821 |
done: |
b97731992 rmap: introduce r... |
1822 1823 |
if (!locked) i_mmap_unlock_read(mapping); |
e9995ef97 ksm: rmap_walk to... |
1824 |
} |
1df631ae1 mm: make rmap_wal... |
1825 |
void rmap_walk(struct page *page, struct rmap_walk_control *rwc) |
e9995ef97 ksm: rmap_walk to... |
1826 |
{ |
e9995ef97 ksm: rmap_walk to... |
1827 |
if (unlikely(PageKsm(page))) |
1df631ae1 mm: make rmap_wal... |
1828 |
rmap_walk_ksm(page, rwc); |
e9995ef97 ksm: rmap_walk to... |
1829 |
else if (PageAnon(page)) |
1df631ae1 mm: make rmap_wal... |
1830 |
rmap_walk_anon(page, rwc, false); |
b97731992 rmap: introduce r... |
1831 |
else |
1df631ae1 mm: make rmap_wal... |
1832 |
rmap_walk_file(page, rwc, false); |
b97731992 rmap: introduce r... |
1833 1834 1835 |
} /* Like rmap_walk, but caller holds relevant rmap lock */ |
1df631ae1 mm: make rmap_wal... |
1836 |
void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) |
b97731992 rmap: introduce r... |
1837 1838 1839 1840 |
{ /* no ksm support for now */ VM_BUG_ON_PAGE(PageKsm(page), page); if (PageAnon(page)) |
1df631ae1 mm: make rmap_wal... |
1841 |
rmap_walk_anon(page, rwc, true); |
e9995ef97 ksm: rmap_walk to... |
1842 |
else |
1df631ae1 mm: make rmap_wal... |
1843 |
rmap_walk_file(page, rwc, true); |
e9995ef97 ksm: rmap_walk to... |
1844 |
} |
0fe6e20b9 hugetlb, rmap: ad... |
1845 |
|
e3390f67a hwpoison: rename ... |
1846 |
#ifdef CONFIG_HUGETLB_PAGE |
0fe6e20b9 hugetlb, rmap: ad... |
1847 |
/* |
451b9514a mm: remove __huge... |
1848 |
* The following two functions are for anonymous (private mapped) hugepages. |
0fe6e20b9 hugetlb, rmap: ad... |
1849 1850 1851 |
* Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. */ |
0fe6e20b9 hugetlb, rmap: ad... |
1852 1853 1854 1855 1856 |
void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct anon_vma *anon_vma = vma->anon_vma; int first; |
a850ea303 hugetlb, rmap: ad... |
1857 1858 |
BUG_ON(!PageLocked(page)); |
0fe6e20b9 hugetlb, rmap: ad... |
1859 |
BUG_ON(!anon_vma); |
5dbe0af47 mm: fix kernel BU... |
1860 |
/* address might be in next vma when migration races vma_adjust */ |
53f9263ba mm: rework mapcou... |
1861 |
first = atomic_inc_and_test(compound_mapcount_ptr(page)); |
0fe6e20b9 hugetlb, rmap: ad... |
1862 |
if (first) |
451b9514a mm: remove __huge... |
1863 |
__page_set_anon_rmap(page, vma, address, 0); |
0fe6e20b9 hugetlb, rmap: ad... |
1864 1865 1866 1867 1868 1869 |
} void hugepage_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
53f9263ba mm: rework mapcou... |
1870 |
atomic_set(compound_mapcount_ptr(page), 0); |
451b9514a mm: remove __huge... |
1871 |
__page_set_anon_rmap(page, vma, address, 1); |
0fe6e20b9 hugetlb, rmap: ad... |
1872 |
} |
e3390f67a hwpoison: rename ... |
1873 |
#endif /* CONFIG_HUGETLB_PAGE */ |