Blame view
include/linux/rmap.h
8.72 KB
b24413180
|
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4c
|
2 3 4 5 6 |
#ifndef _LINUX_RMAP_H #define _LINUX_RMAP_H /* * Declarations for Reverse Mapping functions in mm/rmap.c */ |
1da177e4c
|
7 8 9 |
#include <linux/list.h> #include <linux/slab.h> #include <linux/mm.h> |
5a505085f
|
10 |
#include <linux/rwsem.h> |
bed7161a5
|
11 |
#include <linux/memcontrol.h> |
ace71a19c
|
12 |
#include <linux/highmem.h> |
1da177e4c
|
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
/* * The anon_vma heads a list of private "related" vmas, to scan if * an anonymous page pointing to this anon_vma needs to be unmapped: * the vmas on the list will be related by forking, or by splitting. * * Since vmas come and go as they are split and merged (particularly * in mprotect), the mapping field of an anonymous page cannot point * directly to a vma: instead it points to an anon_vma, on whose list * the related vmas can be easily linked or unlinked. * * After unlinking the last vma on the list, we must garbage collect * the anon_vma object itself: we're guaranteed no page can be * pointing to this anon_vma once its vma list is empty. */ struct anon_vma { |
5a505085f
|
29 30 |
struct anon_vma *root; /* Root of this anon_vma tree */ struct rw_semaphore rwsem; /* W: modification, R: walking the list */ |
7f60c214f
|
31 |
/* |
83813267c
|
32 |
* The refcount is taken on an anon_vma when there is no |
7f60c214f
|
33 34 35 36 37 |
* guarantee that the vma of page tables will exist for * the duration of the operation. A caller that takes * the reference is responsible for clearing up the * anon_vma if they are the last user on release */ |
83813267c
|
38 |
atomic_t refcount; |
7906d00cd
|
39 |
/* |
7a3ef208e
|
40 41 42 43 44 45 46 47 48 49 |
* Count of child anon_vmas and VMAs which points to this anon_vma. * * This counter is used for making decision about reusing anon_vma * instead of forking new one. See comments in function anon_vma_clone. */ unsigned degree; struct anon_vma *parent; /* Parent of this anon_vma */ /* |
bf181b9f9
|
50 |
* NOTE: the LSB of the rb_root.rb_node is set by |
7906d00cd
|
51 |
* mm_take_all_locks() _after_ taking the above lock. So the |
bf181b9f9
|
52 |
* rb_root must only be read/written after taking the above lock |
7906d00cd
|
53 54 55 56 |
* to be sure to see a valid next pointer. The LSB bit itself * is serialized by a system wide lock only visible to * mm_take_all_locks() (mm_all_locks_mutex). */ |
f808c13fd
|
57 58 59 |
/* Interval tree of private "related" vmas */ struct rb_root_cached rb_root; |
5beb49305
|
60 61 62 63 64 65 66 67 68 69 70 71 |
}; /* * The copy-on-write semantics of fork mean that an anon_vma * can become associated with multiple processes. Furthermore, * each child process will have its own anon_vma, where new * pages for that process are instantiated. * * This structure allows us to find the anon_vmas associated * with a VMA, or the VMAs associated with an anon_vma. * The "same_vma" list contains the anon_vma_chains linking * all the anon_vmas associated with this VMA. |
bf181b9f9
|
72 |
* The "rb" field indexes on an interval tree the anon_vma_chains |
5beb49305
|
73 74 75 76 77 |
* which link all the VMAs associated with this anon_vma. */ struct anon_vma_chain { struct vm_area_struct *vma; struct anon_vma *anon_vma; |
c1e8d7c6a
|
78 |
struct list_head same_vma; /* locked by mmap_lock & page_table_lock */ |
5a505085f
|
79 |
struct rb_node rb; /* locked by anon_vma->rwsem */ |
bf181b9f9
|
80 |
unsigned long rb_subtree_last; |
ed8ea8150
|
81 82 83 |
#ifdef CONFIG_DEBUG_VM_RB unsigned long cached_vma_start, cached_vma_last; #endif |
1da177e4c
|
84 |
}; |
02c6de8d7
|
85 |
enum ttu_flags { |
a128ca71f
|
86 87 88 89 90 |
TTU_MIGRATION = 0x1, /* migration mode */ TTU_MUNLOCK = 0x2, /* munlock mode */ TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ |
a128ca71f
|
91 92 |
TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible |
72b252aed
|
93 94 |
* and caller guarantees they will * do a final flush if necessary */ |
b5ff8161e
|
95 |
TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: |
2a52bcbcc
|
96 |
* caller holds it */ |
b5ff8161e
|
97 |
TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */ |
02c6de8d7
|
98 |
}; |
1da177e4c
|
99 |
#ifdef CONFIG_MMU |
76545066c
|
100 101 |
static inline void get_anon_vma(struct anon_vma *anon_vma) { |
83813267c
|
102 |
atomic_inc(&anon_vma->refcount); |
76545066c
|
103 |
} |
01d8b20de
|
104 105 106 107 108 109 110 |
void __put_anon_vma(struct anon_vma *anon_vma); static inline void put_anon_vma(struct anon_vma *anon_vma) { if (atomic_dec_and_test(&anon_vma->refcount)) __put_anon_vma(anon_vma); } |
1da177e4c
|
111 |
|
4fc3f1d66
|
112 |
static inline void anon_vma_lock_write(struct anon_vma *anon_vma) |
cba48b98f
|
113 |
{ |
5a505085f
|
114 |
down_write(&anon_vma->root->rwsem); |
cba48b98f
|
115 |
} |
08b52706d
|
116 |
static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) |
cba48b98f
|
117 |
{ |
5a505085f
|
118 |
up_write(&anon_vma->root->rwsem); |
cba48b98f
|
119 |
} |
4fc3f1d66
|
120 121 122 123 124 125 126 127 128 |
static inline void anon_vma_lock_read(struct anon_vma *anon_vma) { down_read(&anon_vma->root->rwsem); } static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) { up_read(&anon_vma->root->rwsem); } |
1da177e4c
|
129 130 131 132 |
/* * anon_vma helper functions. */ void anon_vma_init(void); /* create anon_vma_cachep */ |
d5a187daf
|
133 |
int __anon_vma_prepare(struct vm_area_struct *); |
5beb49305
|
134 135 136 |
void unlink_anon_vmas(struct vm_area_struct *); int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); |
1da177e4c
|
137 |
|
d5a187daf
|
138 139 140 141 142 143 144 |
static inline int anon_vma_prepare(struct vm_area_struct *vma) { if (likely(vma->anon_vma)) return 0; return __anon_vma_prepare(vma); } |
5beb49305
|
145 146 147 |
static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { |
81d1b09c6
|
148 |
VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); |
5beb49305
|
149 150 |
unlink_anon_vmas(next); } |
01d8b20de
|
151 |
struct anon_vma *page_get_anon_vma(struct page *page); |
d281ee614
|
152 153 154 |
/* bitflags for do_page_add_anon_rmap() */ #define RMAP_EXCLUSIVE 0x01 #define RMAP_COMPOUND 0x02 |
1da177e4c
|
155 156 157 |
/* * rmap interfaces called when adding or removing pte of page */ |
5a49973d7
|
158 |
void page_move_anon_rmap(struct page *, struct vm_area_struct *); |
d281ee614
|
159 160 |
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); |
ad8c2ee80
|
161 162 |
void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, int); |
d281ee614
|
163 164 |
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); |
dd78fedde
|
165 |
void page_add_file_rmap(struct page *, bool); |
d281ee614
|
166 |
void page_remove_rmap(struct page *, bool); |
1da177e4c
|
167 |
|
0fe6e20b9
|
168 169 170 171 |
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
53f9263ba
|
172 |
static inline void page_dup_rmap(struct page *page, bool compound) |
1da177e4c
|
173 |
{ |
53f9263ba
|
174 |
atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); |
1da177e4c
|
175 176 177 178 179 |
} /* * Called from mm/vmscan.c to handle paging out */ |
6fe6b7e35
|
180 |
int page_referenced(struct page *, int is_locked, |
72835c86c
|
181 |
struct mem_cgroup *memcg, unsigned long *vm_flags); |
5ad646880
|
182 |
|
666e5a406
|
183 |
bool try_to_unmap(struct page *, enum ttu_flags flags); |
1da177e4c
|
184 |
|
ace71a19c
|
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
/* Avoid racy checks */ #define PVMW_SYNC (1 << 0) /* Look for migarion entries rather than present PTEs */ #define PVMW_MIGRATION (1 << 1) struct page_vma_mapped_walk { struct page *page; struct vm_area_struct *vma; unsigned long address; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; unsigned int flags; }; static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { if (pvmw->pte) pte_unmap(pvmw->pte); if (pvmw->ptl) spin_unlock(pvmw->ptl); } bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); |
8749cfea1
|
209 |
/* |
1da177e4c
|
210 211 212 |
* Used by swapoff to help locate where page is expected in vma. */ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); |
d08b3851d
|
213 214 215 216 217 218 219 |
/* * Cleans the PTEs of shared mappings. * (and since clean PTEs should also be readonly, write protects them too) * * returns the number of cleaned PTEs. */ int page_mkclean(struct page *); |
b291f0003
|
220 221 222 223 |
/* * called in munlock()/munmap() path to check for other vmas holding * the page mlocked. */ |
192d72325
|
224 |
void try_to_munlock(struct page *); |
b291f0003
|
225 |
|
e388466de
|
226 |
void remove_migration_ptes(struct page *old, struct page *new, bool locked); |
10be22dfe
|
227 228 229 |
/* * Called by memory-failure.c to kill processes. */ |
4fc3f1d66
|
230 231 |
struct anon_vma *page_lock_anon_vma_read(struct page *page); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); |
6a46079cf
|
232 |
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
10be22dfe
|
233 |
|
0dd1c7bbc
|
234 235 236 237 238 239 |
/* * rmap_walk_control: To control rmap traversing for specific needs * * arg: passed to rmap_one() and invalid_vma() * rmap_one: executed on each vma where page is mapped * done: for checking traversing termination condition |
0dd1c7bbc
|
240 241 242 |
* anon_lock: for getting anon_lock by optimized way rather than default * invalid_vma: for skipping uninterested vma */ |
051ac83ad
|
243 244 |
struct rmap_walk_control { void *arg; |
e4b822227
|
245 246 247 248 249 |
/* * Return false if page table scanning in rmap_walk should be stopped. * Otherwise, return true. */ bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, |
051ac83ad
|
250 |
unsigned long addr, void *arg); |
0dd1c7bbc
|
251 |
int (*done)(struct page *page); |
0dd1c7bbc
|
252 253 |
struct anon_vma *(*anon_lock)(struct page *page); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); |
051ac83ad
|
254 |
}; |
1df631ae1
|
255 256 |
void rmap_walk(struct page *page, struct rmap_walk_control *rwc); void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); |
e9995ef97
|
257 |
|
1da177e4c
|
258 259 260 261 262 |
#else /* !CONFIG_MMU */ #define anon_vma_init() do {} while (0) #define anon_vma_prepare(vma) (0) #define anon_vma_link(vma) do {} while (0) |
01ff53f41
|
263 |
static inline int page_referenced(struct page *page, int is_locked, |
72835c86c
|
264 |
struct mem_cgroup *memcg, |
01ff53f41
|
265 266 267 |
unsigned long *vm_flags) { *vm_flags = 0; |
645747462
|
268 |
return 0; |
01ff53f41
|
269 |
} |
666e5a406
|
270 |
#define try_to_unmap(page, refs) false |
1da177e4c
|
271 |
|
d08b3851d
|
272 273 274 275 |
static inline int page_mkclean(struct page *page) { return 0; } |
1da177e4c
|
276 |
#endif /* CONFIG_MMU */ |
1da177e4c
|
277 |
#endif /* _LINUX_RMAP_H */ |