Blame view
mm/mremap.c
22.1 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c Linux-2.6.12-rc2 |
2 3 4 5 6 |
/* * mm/mremap.c * * (C) Copyright 1996 Linus Torvalds * |
046c68842 mm: update my add... |
7 |
* Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4c Linux-2.6.12-rc2 |
8 9 10 11 12 |
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> |
1da177e4c Linux-2.6.12-rc2 |
13 |
#include <linux/shm.h> |
1ff829957 ksm: prevent mrem... |
14 |
#include <linux/ksm.h> |
1da177e4c Linux-2.6.12-rc2 |
15 16 |
#include <linux/mman.h> #include <linux/swap.h> |
c59ede7b7 [PATCH] move capa... |
17 |
#include <linux/capability.h> |
1da177e4c Linux-2.6.12-rc2 |
18 |
#include <linux/fs.h> |
6dec97dc9 mm: move_ptes -- ... |
19 |
#include <linux/swapops.h> |
1da177e4c Linux-2.6.12-rc2 |
20 21 22 |
#include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> |
cddb8a5c1 mmu-notifiers: core |
23 |
#include <linux/mmu_notifier.h> |
2581d2023 mm/mremap.c: use ... |
24 |
#include <linux/uaccess.h> |
4abad2ca4 mm: new arch_rema... |
25 |
#include <linux/mm-arch-hooks.h> |
72f87654c userfaultfd: non-... |
26 |
#include <linux/userfaultfd_k.h> |
1da177e4c Linux-2.6.12-rc2 |
27 |
|
1da177e4c Linux-2.6.12-rc2 |
28 29 |
#include <asm/cacheflush.h> #include <asm/tlbflush.h> |
ba470de43 mmap: handle mloc... |
30 |
#include "internal.h" |
7be7a5469 [PATCH] mm: move_... |
31 |
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) |
1da177e4c Linux-2.6.12-rc2 |
32 33 |
{ pgd_t *pgd; |
c2febafc6 mm: convert gener... |
34 |
p4d_t *p4d; |
1da177e4c Linux-2.6.12-rc2 |
35 36 37 38 39 40 |
pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; |
c2febafc6 mm: convert gener... |
41 42 43 44 45 |
p4d = p4d_offset(pgd, addr); if (p4d_none_or_clear_bad(p4d)) return NULL; pud = pud_offset(p4d, addr); |
1da177e4c Linux-2.6.12-rc2 |
46 47 48 49 |
if (pud_none_or_clear_bad(pud)) return NULL; pmd = pmd_offset(pud, addr); |
37a1c49a9 thp: mremap suppo... |
50 |
if (pmd_none(*pmd)) |
1da177e4c Linux-2.6.12-rc2 |
51 |
return NULL; |
7be7a5469 [PATCH] mm: move_... |
52 |
return pmd; |
1da177e4c Linux-2.6.12-rc2 |
53 |
} |
8ac1f8320 thp: pte alloc tr... |
54 55 |
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) |
1da177e4c Linux-2.6.12-rc2 |
56 57 |
{ pgd_t *pgd; |
c2febafc6 mm: convert gener... |
58 |
p4d_t *p4d; |
1da177e4c Linux-2.6.12-rc2 |
59 |
pud_t *pud; |
c74df32c7 [PATCH] mm: ptd_a... |
60 |
pmd_t *pmd; |
1da177e4c Linux-2.6.12-rc2 |
61 62 |
pgd = pgd_offset(mm, addr); |
c2febafc6 mm: convert gener... |
63 64 65 66 |
p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return NULL; pud = pud_alloc(mm, p4d, addr); |
1da177e4c Linux-2.6.12-rc2 |
67 |
if (!pud) |
c74df32c7 [PATCH] mm: ptd_a... |
68 |
return NULL; |
7be7a5469 [PATCH] mm: move_... |
69 |
|
1da177e4c Linux-2.6.12-rc2 |
70 |
pmd = pmd_alloc(mm, pud, addr); |
57a8f0cdb mm: revert mremap... |
71 |
if (!pmd) |
c74df32c7 [PATCH] mm: ptd_a... |
72 |
return NULL; |
7be7a5469 [PATCH] mm: move_... |
73 |
|
8ac1f8320 thp: pte alloc tr... |
74 |
VM_BUG_ON(pmd_trans_huge(*pmd)); |
c74df32c7 [PATCH] mm: ptd_a... |
75 |
|
7be7a5469 [PATCH] mm: move_... |
76 |
return pmd; |
1da177e4c Linux-2.6.12-rc2 |
77 |
} |
1d069b7dd huge pagecache: e... |
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
static void take_rmap_locks(struct vm_area_struct *vma) { if (vma->vm_file) i_mmap_lock_write(vma->vm_file->f_mapping); if (vma->anon_vma) anon_vma_lock_write(vma->anon_vma); } static void drop_rmap_locks(struct vm_area_struct *vma) { if (vma->anon_vma) anon_vma_unlock_write(vma->anon_vma); if (vma->vm_file) i_mmap_unlock_write(vma->vm_file->f_mapping); } |
6dec97dc9 mm: move_ptes -- ... |
93 94 95 96 97 98 99 100 101 102 103 |
static pte_t move_soft_dirty_pte(pte_t pte) { /* * Set soft dirty bit so we can notice * in userspace the ptes were moved. */ #ifdef CONFIG_MEM_SOFT_DIRTY if (pte_present(pte)) pte = pte_mksoft_dirty(pte); else if (is_swap_pte(pte)) pte = pte_swp_mksoft_dirty(pte); |
6dec97dc9 mm: move_ptes -- ... |
104 105 106 |
#endif return pte; } |
7be7a5469 [PATCH] mm: move_... |
107 108 109 |
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, |
eb66ae030 mremap: properly ... |
110 |
unsigned long new_addr, bool need_rmap_locks) |
1da177e4c Linux-2.6.12-rc2 |
111 |
{ |
1da177e4c Linux-2.6.12-rc2 |
112 |
struct mm_struct *mm = vma->vm_mm; |
7be7a5469 [PATCH] mm: move_... |
113 |
pte_t *old_pte, *new_pte, pte; |
4c21e2f24 [PATCH] mm: split... |
114 |
spinlock_t *old_ptl, *new_ptl; |
5d1904204 mremap: fix race ... |
115 116 |
bool force_flush = false; unsigned long len = old_end - old_addr; |
1da177e4c Linux-2.6.12-rc2 |
117 |
|
38a76013a mm: avoid taking ... |
118 |
/* |
c8c06efa8 mm: convert i_mma... |
119 |
* When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma |
38a76013a mm: avoid taking ... |
120 121 122 123 124 125 126 127 |
* locks to ensure that rmap will always observe either the old or the * new ptes. This is the easiest way to avoid races with * truncate_pagecache(), page migration, etc... * * When need_rmap_locks is false, we use other ways to avoid * such races: * * - During exec() shift_arg_pages(), we use a specially tagged vma |
222100eed mm/vma: make is_v... |
128 |
* which rmap call sites look for using vma_is_temporary_stack(). |
38a76013a mm: avoid taking ... |
129 130 131 132 133 134 135 |
* * - During mremap(), new_vma is often known to be placed after vma * in rmap traversal order. This ensures rmap will always observe * either the old pte, or the new pte, or both (the page table locks * serialize access to individual ptes, but only rmap traversal * order guarantees that we won't miss both the old and new ptes). */ |
1d069b7dd huge pagecache: e... |
136 137 |
if (need_rmap_locks) take_rmap_locks(vma); |
1da177e4c Linux-2.6.12-rc2 |
138 |
|
4c21e2f24 [PATCH] mm: split... |
139 140 |
/* * We don't have to worry about the ordering of src and dst |
c1e8d7c6a mmap locking API:... |
141 |
* pte locks because exclusive mmap_lock prevents deadlock. |
4c21e2f24 [PATCH] mm: split... |
142 |
*/ |
c74df32c7 [PATCH] mm: ptd_a... |
143 |
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); |
ece0e2b64 mm: remove pte_*m... |
144 |
new_pte = pte_offset_map(new_pmd, new_addr); |
4c21e2f24 [PATCH] mm: split... |
145 146 |
new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) |
f20dc5f7c [PATCH] lockdep: ... |
147 |
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
3ea277194 mm, mprotect: flu... |
148 |
flush_tlb_batched_pending(vma->vm_mm); |
6606c3e0d [PATCH] paravirt:... |
149 |
arch_enter_lazy_mmu_mode(); |
7be7a5469 [PATCH] mm: move_... |
150 151 152 153 154 |
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; |
5d1904204 mremap: fix race ... |
155 |
|
a2ce2666a mremap: move_ptes... |
156 |
pte = ptep_get_and_clear(mm, old_addr, old_pte); |
5d1904204 mremap: fix race ... |
157 |
/* |
eb66ae030 mremap: properly ... |
158 |
* If we are remapping a valid PTE, make sure |
a2ce2666a mremap: move_ptes... |
159 |
* to flush TLB before we drop the PTL for the |
eb66ae030 mremap: properly ... |
160 |
* PTE. |
a2ce2666a mremap: move_ptes... |
161 |
* |
eb66ae030 mremap: properly ... |
162 163 164 165 166 |
* NOTE! Both old and new PTL matter: the old one * for racing with page_mkclean(), the new one to * make sure the physical page stays valid until * the TLB entry for the old mapping has been * flushed. |
5d1904204 mremap: fix race ... |
167 |
*/ |
eb66ae030 mremap: properly ... |
168 |
if (pte_present(pte)) |
5d1904204 mremap: fix race ... |
169 |
force_flush = true; |
7be7a5469 [PATCH] mm: move_... |
170 |
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
6dec97dc9 mm: move_ptes -- ... |
171 172 |
pte = move_soft_dirty_pte(pte); set_pte_at(mm, new_addr, new_pte, pte); |
1da177e4c Linux-2.6.12-rc2 |
173 |
} |
7be7a5469 [PATCH] mm: move_... |
174 |
|
6606c3e0d [PATCH] paravirt:... |
175 |
arch_leave_lazy_mmu_mode(); |
eb66ae030 mremap: properly ... |
176 177 |
if (force_flush) flush_tlb_range(vma, old_end - len, old_end); |
4c21e2f24 [PATCH] mm: split... |
178 179 |
if (new_ptl != old_ptl) spin_unlock(new_ptl); |
ece0e2b64 mm: remove pte_*m... |
180 |
pte_unmap(new_pte - 1); |
c74df32c7 [PATCH] mm: ptd_a... |
181 |
pte_unmap_unlock(old_pte - 1, old_ptl); |
1d069b7dd huge pagecache: e... |
182 183 |
if (need_rmap_locks) drop_rmap_locks(vma); |
1da177e4c Linux-2.6.12-rc2 |
184 |
} |
2c91bd4a4 mm: speed up mrem... |
185 186 |
#ifdef CONFIG_HAVE_MOVE_PMD static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
b8aa9d9d9 mm/mremap: it is ... |
187 |
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) |
2c91bd4a4 mm: speed up mrem... |
188 189 190 191 |
{ spinlock_t *old_ptl, *new_ptl; struct mm_struct *mm = vma->vm_mm; pmd_t pmd; |
2c91bd4a4 mm: speed up mrem... |
192 193 |
/* * The destination pmd shouldn't be established, free_pgtables() |
f81fdd0c4 mm: document warn... |
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
* should have released it. * * However, there's a case during execve() where we use mremap * to move the initial stack, and in that case the target area * may overlap the source area (always moving down). * * If everything is PMD-aligned, that works fine, as moving * each pmd down will clear the source pmd. But if we first * have a few 4kB-only pages that get moved down, and then * hit the "now the rest is PMD-aligned, let's do everything * one pmd at a time", we will still have the old (now empty * of any 4kB pages, but still there) PMD in the page table * tree. * * Warn on it once - because we really should try to figure * out how to do this better - but then say "I won't move * this pmd". * * One alternative might be to just unmap the target pmd at * this point, and verify that it really is empty. We'll see. |
2c91bd4a4 mm: speed up mrem... |
214 |
*/ |
f81fdd0c4 mm: document warn... |
215 |
if (WARN_ON_ONCE(!pmd_none(*new_pmd))) |
2c91bd4a4 mm: speed up mrem... |
216 217 218 219 |
return false; /* * We don't have to worry about the ordering of src and dst |
c1e8d7c6a mmap locking API:... |
220 |
* ptlocks because exclusive mmap_lock prevents deadlock. |
2c91bd4a4 mm: speed up mrem... |
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 |
*/ old_ptl = pmd_lock(vma->vm_mm, old_pmd); new_ptl = pmd_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); /* Clear the pmd */ pmd = *old_pmd; pmd_clear(old_pmd); VM_BUG_ON(!pmd_none(*new_pmd)); /* Set the new pmd */ set_pmd_at(mm, new_addr, new_pmd, pmd); flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); return true; } #endif |
b6a2fea39 mm: variable leng... |
243 |
unsigned long move_page_tables(struct vm_area_struct *vma, |
1da177e4c Linux-2.6.12-rc2 |
244 |
unsigned long old_addr, struct vm_area_struct *new_vma, |
38a76013a mm: avoid taking ... |
245 246 |
unsigned long new_addr, unsigned long len, bool need_rmap_locks) |
1da177e4c Linux-2.6.12-rc2 |
247 |
{ |
7be7a5469 [PATCH] mm: move_... |
248 |
unsigned long extent, next, old_end; |
ac46d4f3c mm/mmu_notifier: ... |
249 |
struct mmu_notifier_range range; |
7be7a5469 [PATCH] mm: move_... |
250 |
pmd_t *old_pmd, *new_pmd; |
1da177e4c Linux-2.6.12-rc2 |
251 |
|
7be7a5469 [PATCH] mm: move_... |
252 253 |
old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); |
1da177e4c Linux-2.6.12-rc2 |
254 |
|
6f4f13e8d mm/mmu_notifier: ... |
255 256 |
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, old_addr, old_end); |
ac46d4f3c mm/mmu_notifier: ... |
257 |
mmu_notifier_invalidate_range_start(&range); |
7b6efc2bc mremap: avoid sen... |
258 |
|
7be7a5469 [PATCH] mm: move_... |
259 |
for (; old_addr < old_end; old_addr += extent, new_addr += extent) { |
1da177e4c Linux-2.6.12-rc2 |
260 |
cond_resched(); |
7be7a5469 [PATCH] mm: move_... |
261 |
next = (old_addr + PMD_SIZE) & PMD_MASK; |
ebed48460 mremap: check for... |
262 |
/* even if next overflowed, extent below will be ok */ |
7be7a5469 [PATCH] mm: move_... |
263 |
extent = next - old_addr; |
ebed48460 mremap: check for... |
264 265 |
if (extent > old_end - old_addr) extent = old_end - old_addr; |
9ad9718bf mm/mremap: calcul... |
266 267 268 |
next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) extent = next - new_addr; |
7be7a5469 [PATCH] mm: move_... |
269 270 271 |
old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; |
8ac1f8320 thp: pte alloc tr... |
272 |
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); |
7be7a5469 [PATCH] mm: move_... |
273 274 |
if (!new_pmd) break; |
5bfea2d9b mm: Fix mremap no... |
275 |
if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { |
dd18dbc2d mm, thp: close ra... |
276 |
if (extent == HPAGE_PMD_SIZE) { |
4b471e889 mm, thp: remove i... |
277 |
bool moved; |
dd18dbc2d mm, thp: close ra... |
278 279 |
/* See comment in move_ptes() */ if (need_rmap_locks) |
1d069b7dd huge pagecache: e... |
280 |
take_rmap_locks(vma); |
bf8616d5f huge mm: move_hug... |
281 |
moved = move_huge_pmd(vma, old_addr, new_addr, |
b8aa9d9d9 mm/mremap: it is ... |
282 |
old_pmd, new_pmd); |
dd18dbc2d mm, thp: close ra... |
283 |
if (need_rmap_locks) |
1d069b7dd huge pagecache: e... |
284 |
drop_rmap_locks(vma); |
5d1904204 mremap: fix race ... |
285 |
if (moved) |
4b471e889 mm, thp: remove i... |
286 |
continue; |
dd18dbc2d mm, thp: close ra... |
287 |
} |
4b471e889 mm, thp: remove i... |
288 |
split_huge_pmd(vma, old_pmd, old_addr); |
337d9abf1 mm: thp: check pm... |
289 |
if (pmd_trans_unstable(old_pmd)) |
6b9116a65 mm, dax: check fo... |
290 |
continue; |
2c91bd4a4 mm: speed up mrem... |
291 292 293 294 295 296 297 298 299 300 301 |
} else if (extent == PMD_SIZE) { #ifdef CONFIG_HAVE_MOVE_PMD /* * If the extent is PMD-sized, try to speed the move by * moving at the PMD level if possible. */ bool moved; if (need_rmap_locks) take_rmap_locks(vma); moved = move_normal_pmd(vma, old_addr, new_addr, |
b8aa9d9d9 mm/mremap: it is ... |
302 |
old_pmd, new_pmd); |
2c91bd4a4 mm: speed up mrem... |
303 304 305 306 307 |
if (need_rmap_locks) drop_rmap_locks(vma); if (moved) continue; #endif |
37a1c49a9 thp: mremap suppo... |
308 |
} |
2c91bd4a4 mm: speed up mrem... |
309 |
|
4cf589249 mm: treewide: rem... |
310 |
if (pte_alloc(new_vma->vm_mm, new_pmd)) |
37a1c49a9 thp: mremap suppo... |
311 |
break; |
5d1904204 mremap: fix race ... |
312 |
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, |
eb66ae030 mremap: properly ... |
313 |
new_pmd, new_addr, need_rmap_locks); |
1da177e4c Linux-2.6.12-rc2 |
314 |
} |
7b6efc2bc mremap: avoid sen... |
315 |
|
ac46d4f3c mm/mmu_notifier: ... |
316 |
mmu_notifier_invalidate_range_end(&range); |
7be7a5469 [PATCH] mm: move_... |
317 318 |
return len + old_addr - old_end; /* how much done */ |
1da177e4c Linux-2.6.12-rc2 |
319 320 321 322 |
} static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, |
72f87654c userfaultfd: non-... |
323 |
unsigned long new_len, unsigned long new_addr, |
e346b3813 mm/mremap: add MR... |
324 325 |
bool *locked, unsigned long flags, struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) |
1da177e4c Linux-2.6.12-rc2 |
326 327 328 329 330 331 332 |
{ struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; unsigned long excess = 0; |
365e9c87a [PATCH] mm: updat... |
333 |
unsigned long hiwater_vm; |
1da177e4c Linux-2.6.12-rc2 |
334 |
int split = 0; |
7103ad323 ksm: mremap use e... |
335 |
int err; |
38a76013a mm: avoid taking ... |
336 |
bool need_rmap_locks; |
1da177e4c Linux-2.6.12-rc2 |
337 338 339 340 341 342 343 |
/* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; |
1ff829957 ksm: prevent mrem... |
344 345 346 347 348 349 350 |
/* * Advise KSM to break any KSM pages in the area to be moved: * it would be confusing if they were to turn up at the new * location, where they happen to coincide with different KSM * pages recently unmapped. But leave vma->vm_flags as it was, * so KSM can come around to merge on vma and new_vma afterwards. */ |
7103ad323 ksm: mremap use e... |
351 352 353 354 |
err = ksm_madvise(vma, old_addr, old_addr + old_len, MADV_UNMERGEABLE, &vm_flags); if (err) return err; |
1ff829957 ksm: prevent mrem... |
355 |
|
1da177e4c Linux-2.6.12-rc2 |
356 |
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); |
38a76013a mm: avoid taking ... |
357 358 |
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, &need_rmap_locks); |
1da177e4c Linux-2.6.12-rc2 |
359 360 |
if (!new_vma) return -ENOMEM; |
38a76013a mm: avoid taking ... |
361 362 |
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, need_rmap_locks); |
1da177e4c Linux-2.6.12-rc2 |
363 |
if (moved_len < old_len) { |
df1eab303 mremap: don't lea... |
364 |
err = -ENOMEM; |
5477e70a6 mm: move ->mremap... |
365 366 |
} else if (vma->vm_ops && vma->vm_ops->mremap) { err = vma->vm_ops->mremap(new_vma); |
df1eab303 mremap: don't lea... |
367 368 369 |
} if (unlikely(err)) { |
1da177e4c Linux-2.6.12-rc2 |
370 371 372 373 374 |
/* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ |
38a76013a mm: avoid taking ... |
375 376 |
move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, true); |
1da177e4c Linux-2.6.12-rc2 |
377 378 379 |
vma = new_vma; old_len = new_len; old_addr = new_addr; |
df1eab303 mremap: don't lea... |
380 |
new_addr = err; |
4abad2ca4 mm: new arch_rema... |
381 |
} else { |
72f87654c userfaultfd: non-... |
382 |
mremap_userfaultfd_prep(new_vma, uf); |
4abad2ca4 mm: new arch_rema... |
383 384 |
arch_remap(mm, old_addr, old_addr + old_len, new_addr, new_addr + new_len); |
b2edffdd9 fix mremap() vs. ... |
385 |
} |
1da177e4c Linux-2.6.12-rc2 |
386 387 388 389 390 391 392 393 394 |
/* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { vma->vm_flags &= ~VM_ACCOUNT; excess = vma->vm_end - vma->vm_start - old_len; if (old_addr > vma->vm_start && old_addr + old_len < vma->vm_end) split = 1; } |
717990629 [PATCH] mm acct a... |
395 |
/* |
365e9c87a [PATCH] mm: updat... |
396 397 398 399 400 401 402 |
* If we failed to move page tables we still do total_vm increment * since do_munmap() will decrement it by old_len == new_len. * * Since total_vm is about to be raised artificially high for a * moment, we need to restore high watermark afterwards: if stats * are taken meanwhile, total_vm and hiwater_vm appear too high. * If this were a serious issue, we'd add a flag to do_munmap(). |
717990629 [PATCH] mm acct a... |
403 |
*/ |
365e9c87a [PATCH] mm: updat... |
404 |
hiwater_vm = mm->hiwater_vm; |
846383359 mm: rework virtua... |
405 |
vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); |
717990629 [PATCH] mm acct a... |
406 |
|
d9fe4fab1 x86/mm/pat: Add u... |
407 408 409 |
/* Tell pfnmap has moved from this vma */ if (unlikely(vma->vm_flags & VM_PFNMAP)) untrack_pfn_moved(vma); |
e346b3813 mm/mremap: add MR... |
410 411 412 413 |
if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { if (vm_flags & VM_ACCOUNT) { /* Always put back VM_ACCOUNT since we won't unmap */ vma->vm_flags |= VM_ACCOUNT; |
dadbd85f2 mm: Fix MREMAP_DO... |
414 |
vm_acct_memory(new_len >> PAGE_SHIFT); |
e346b3813 mm/mremap: add MR... |
415 |
} |
dadbd85f2 mm: Fix MREMAP_DO... |
416 417 418 419 420 421 422 423 424 425 |
/* * VMAs can actually be merged back together in copy_vma * calling merge_vma. This can happen with anonymous vmas * which have not yet been faulted, so if we were to consider * this VMA split we'll end up adding VM_ACCOUNT on the * next VMA, which is completely unrelated if this VMA * was re-merged. */ if (split && new_vma == vma) split = 0; |
e346b3813 mm/mremap: add MR... |
426 427 428 429 430 431 |
/* We always clear VM_LOCKED[ONFAULT] on the old vma */ vma->vm_flags &= VM_LOCKED_CLEAR_MASK; /* Because we won't unmap we don't need to touch locked_vm */ goto out; } |
897ab3e0c userfaultfd: non-... |
432 |
if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { |
1da177e4c Linux-2.6.12-rc2 |
433 434 435 436 |
/* OOM: unable to split vma, just get accounts right */ vm_unacct_memory(excess >> PAGE_SHIFT); excess = 0; } |
e346b3813 mm/mremap: add MR... |
437 438 439 440 441 442 |
if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; *locked = true; } out: |
365e9c87a [PATCH] mm: updat... |
443 |
mm->hiwater_vm = hiwater_vm; |
1da177e4c Linux-2.6.12-rc2 |
444 445 446 447 448 449 450 |
/* Restore VM_ACCOUNT if one or two pieces of vma left */ if (excess) { vma->vm_flags |= VM_ACCOUNT; if (split) vma->vm_next->vm_flags |= VM_ACCOUNT; } |
1da177e4c Linux-2.6.12-rc2 |
451 452 |
return new_addr; } |
54f5de709 untangling do_mre... |
453 |
static struct vm_area_struct *vma_to_resize(unsigned long addr, |
e346b3813 mm/mremap: add MR... |
454 455 |
unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long *p) |
54f5de709 untangling do_mre... |
456 457 458 |
{ struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); |
1d3916869 mremap: don't do ... |
459 |
unsigned long pgoff; |
54f5de709 untangling do_mre... |
460 461 |
if (!vma || vma->vm_start > addr) |
6cd576130 mm/mremap.c: clea... |
462 |
return ERR_PTR(-EFAULT); |
54f5de709 untangling do_mre... |
463 |
|
dba58d3b8 mm/mremap: fail m... |
464 465 466 467 468 469 470 471 472 473 474 475 476 |
/* * !old_len is a special case where an attempt is made to 'duplicate' * a mapping. This makes no sense for private mappings as it will * instead create a fresh/new mapping unrelated to the original. This * is contrary to the basic idea of mremap which creates new mappings * based on the original. There are no known use cases for this * behavior. As a result, fail such attempts. */ if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported. ", current->comm, current->pid); return ERR_PTR(-EINVAL); } |
e346b3813 mm/mremap: add MR... |
477 478 479 |
if (flags & MREMAP_DONTUNMAP && (!vma_is_anonymous(vma) || vma->vm_flags & VM_SHARED)) return ERR_PTR(-EINVAL); |
54f5de709 untangling do_mre... |
480 |
if (is_vm_hugetlb_page(vma)) |
6cd576130 mm/mremap.c: clea... |
481 |
return ERR_PTR(-EINVAL); |
54f5de709 untangling do_mre... |
482 483 484 |
/* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) |
6cd576130 mm/mremap.c: clea... |
485 |
return ERR_PTR(-EFAULT); |
54f5de709 untangling do_mre... |
486 |
|
1d3916869 mremap: don't do ... |
487 488 |
if (new_len == old_len) return vma; |
982134ba6 mm: avoid wrappin... |
489 |
/* Need to be careful about a growing mapping */ |
1d3916869 mremap: don't do ... |
490 491 492 493 494 495 496 |
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) return ERR_PTR(-EINVAL); if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) return ERR_PTR(-EFAULT); |
54f5de709 untangling do_mre... |
497 498 499 500 |
if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; |
59e99e5b9 mm: use rlimit he... |
501 |
lock_limit = rlimit(RLIMIT_MEMLOCK); |
54f5de709 untangling do_mre... |
502 503 |
locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) |
6cd576130 mm/mremap.c: clea... |
504 |
return ERR_PTR(-EAGAIN); |
54f5de709 untangling do_mre... |
505 |
} |
846383359 mm: rework virtua... |
506 507 |
if (!may_expand_vm(mm, vma->vm_flags, (new_len - old_len) >> PAGE_SHIFT)) |
6cd576130 mm/mremap.c: clea... |
508 |
return ERR_PTR(-ENOMEM); |
54f5de709 untangling do_mre... |
509 510 511 |
if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; |
191c54244 mm: collapse secu... |
512 |
if (security_vm_enough_memory_mm(mm, charged)) |
6cd576130 mm/mremap.c: clea... |
513 |
return ERR_PTR(-ENOMEM); |
54f5de709 untangling do_mre... |
514 515 516 517 |
*p = charged; } return vma; |
54f5de709 untangling do_mre... |
518 |
} |
81909b842 mm: use mm_popula... |
519 |
static unsigned long mremap_to(unsigned long addr, unsigned long old_len, |
72f87654c userfaultfd: non-... |
520 |
unsigned long new_addr, unsigned long new_len, bool *locked, |
e346b3813 mm/mremap: add MR... |
521 |
unsigned long flags, struct vm_userfaultfd_ctx *uf, |
b22823719 userfaultfd: non-... |
522 |
struct list_head *uf_unmap_early, |
897ab3e0c userfaultfd: non-... |
523 |
struct list_head *uf_unmap) |
ecc1a8993 do_mremap() untan... |
524 525 526 527 528 |
{ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; |
e346b3813 mm/mremap: add MR... |
529 |
unsigned long map_flags = 0; |
ecc1a8993 do_mremap() untan... |
530 |
|
f19cb115a mm/mremap: use of... |
531 |
if (offset_in_page(new_addr)) |
ecc1a8993 do_mremap() untan... |
532 533 534 535 |
goto out; if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; |
9943242ca mremap: simplify ... |
536 537 |
/* Ensure the old/new locations do not overlap */ if (addr + old_len > new_addr && new_addr + new_len > addr) |
ecc1a8993 do_mremap() untan... |
538 |
goto out; |
ea2c3f6f5 mm,mremap: bail o... |
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 |
/* * move_vma() need us to stay 4 maps below the threshold, otherwise * it will bail out at the very beginning. * That is a problem if we have already unmaped the regions here * (new_addr, and old_addr), because userspace will not know the * state of the vma's after it gets -ENOMEM. * So, to avoid such scenario we can pre-compute if the whole * operation has high chances to success map-wise. * Worst-scenario case is when both vma's (new_addr and old_addr) get * split in 3 before unmaping it. * That means 2 more maps (1 for each) to the ones we already hold. * Check whether current map count plus 2 still leads us to 4 maps below * the threshold, otherwise return -ENOMEM here to be more safe. */ if ((mm->map_count + 2) >= sysctl_max_map_count - 3) return -ENOMEM; |
e346b3813 mm/mremap: add MR... |
555 556 557 558 559 |
if (flags & MREMAP_FIXED) { ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); if (ret) goto out; } |
ecc1a8993 do_mremap() untan... |
560 561 |
if (old_len >= new_len) { |
897ab3e0c userfaultfd: non-... |
562 |
ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); |
ecc1a8993 do_mremap() untan... |
563 564 565 566 |
if (ret && old_len != new_len) goto out; old_len = new_len; } |
e346b3813 mm/mremap: add MR... |
567 |
vma = vma_to_resize(addr, old_len, new_len, flags, &charged); |
ecc1a8993 do_mremap() untan... |
568 569 570 571 |
if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } |
e346b3813 mm/mremap: add MR... |
572 573 574 575 576 577 578 579 580 |
/* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ if (flags & MREMAP_DONTUNMAP && !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { ret = -ENOMEM; goto out; } if (flags & MREMAP_FIXED) map_flags |= MAP_FIXED; |
097eed103 fix the arch chec... |
581 582 |
if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; |
9206de95b Take arch_mmap_ch... |
583 |
|
097eed103 fix the arch chec... |
584 585 586 |
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); |
ff68dac6d mm/mmap.c: use IS... |
587 |
if (IS_ERR_VALUE(ret)) |
097eed103 fix the arch chec... |
588 |
goto out1; |
e346b3813 mm/mremap: add MR... |
589 590 591 592 593 |
/* We got a new mapping */ if (!(flags & MREMAP_FIXED)) new_addr = ret; ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, |
897ab3e0c userfaultfd: non-... |
594 |
uf_unmap); |
e346b3813 mm/mremap: add MR... |
595 |
|
f19cb115a mm/mremap: use of... |
596 |
if (!(offset_in_page(ret))) |
097eed103 fix the arch chec... |
597 |
goto out; |
e346b3813 mm/mremap: add MR... |
598 |
|
097eed103 fix the arch chec... |
599 600 |
out1: vm_unacct_memory(charged); |
ecc1a8993 do_mremap() untan... |
601 602 603 604 |
out: return ret; } |
1a0ef85f8 do_mremap() untan... |
605 606 |
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { |
f106af4e9 fix checks for ex... |
607 |
unsigned long end = vma->vm_end + delta; |
9206de95b Take arch_mmap_ch... |
608 |
if (end < vma->vm_end) /* overflow */ |
f106af4e9 fix checks for ex... |
609 |
return 0; |
9206de95b Take arch_mmap_ch... |
610 |
if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ |
f106af4e9 fix checks for ex... |
611 612 613 |
return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) |
1a0ef85f8 do_mremap() untan... |
614 |
return 0; |
1a0ef85f8 do_mremap() untan... |
615 616 |
return 1; } |
1da177e4c Linux-2.6.12-rc2 |
617 618 619 620 621 622 623 |
/* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ |
63a81db13 merge do_mremap()... |
624 625 626 |
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) |
1da177e4c Linux-2.6.12-rc2 |
627 |
{ |
d0de32d9b [PATCH] mm: do_mr... |
628 |
struct mm_struct *mm = current->mm; |
1da177e4c Linux-2.6.12-rc2 |
629 630 631 |
struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; |
81909b842 mm: use mm_popula... |
632 |
bool locked = false; |
85a06835f mm: mremap: downg... |
633 |
bool downgraded = false; |
72f87654c userfaultfd: non-... |
634 |
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; |
b22823719 userfaultfd: non-... |
635 |
LIST_HEAD(uf_unmap_early); |
897ab3e0c userfaultfd: non-... |
636 |
LIST_HEAD(uf_unmap); |
1da177e4c Linux-2.6.12-rc2 |
637 |
|
b2a84de2a mm/mremap: Add co... |
638 639 640 641 642 643 644 645 646 647 |
/* * There is a deliberate asymmetry here: we strip the pointer tag * from the old address but leave the new address alone. This is * for consistency with mmap(), where we prevent the creation of * aliasing mappings in userspace by leaving the tag bits of the * mapping address intact. A non-zero tag will cause the subsequent * range checks to reject the address as invalid. * * See Documentation/arm64/tagged-address-abi.rst for more information. */ |
057d33891 mm: untag user po... |
648 |
addr = untagged_addr(addr); |
e346b3813 mm/mremap: add MR... |
649 |
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) |
9a2458a63 mm: mremap: valid... |
650 651 652 653 |
return ret; if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) return ret; |
1da177e4c Linux-2.6.12-rc2 |
654 |
|
e346b3813 mm/mremap: add MR... |
655 656 657 658 659 660 661 |
/* * MREMAP_DONTUNMAP is always a move and it does not allow resizing * in the process. */ if (flags & MREMAP_DONTUNMAP && (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) return ret; |
f19cb115a mm/mremap: use of... |
662 |
if (offset_in_page(addr)) |
9a2458a63 mm: mremap: valid... |
663 |
return ret; |
1da177e4c Linux-2.6.12-rc2 |
664 665 666 667 668 669 670 671 672 673 |
old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); /* * We allow a zero old-len as a special case * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ if (!new_len) |
9a2458a63 mm: mremap: valid... |
674 |
return ret; |
d8ed45c5d mmap locking API:... |
675 |
if (mmap_write_lock_killable(current->mm)) |
dc0ef0df7 mm: make mmap_sem... |
676 |
return -EINTR; |
1da177e4c Linux-2.6.12-rc2 |
677 |
|
e346b3813 mm/mremap: add MR... |
678 |
if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { |
9a2458a63 mm: mremap: valid... |
679 |
ret = mremap_to(addr, old_len, new_addr, new_len, |
e346b3813 mm/mremap: add MR... |
680 681 |
&locked, flags, &uf, &uf_unmap_early, &uf_unmap); |
ecc1a8993 do_mremap() untan... |
682 |
goto out; |
1da177e4c Linux-2.6.12-rc2 |
683 684 685 686 687 |
} /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. |
85a06835f mm: mremap: downg... |
688 |
* __do_munmap does all the needed commit accounting, and |
c1e8d7c6a mmap locking API:... |
689 |
* downgrades mmap_lock to read if so directed. |
1da177e4c Linux-2.6.12-rc2 |
690 691 |
*/ if (old_len >= new_len) { |
85a06835f mm: mremap: downg... |
692 693 694 695 696 697 |
int retval; retval = __do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap, true); if (retval < 0 && old_len != new_len) { ret = retval; |
1da177e4c Linux-2.6.12-rc2 |
698 |
goto out; |
c1e8d7c6a mmap locking API:... |
699 |
/* Returning 1 indicates mmap_lock is downgraded to read. */ |
85a06835f mm: mremap: downg... |
700 701 |
} else if (retval == 1) downgraded = true; |
1da177e4c Linux-2.6.12-rc2 |
702 |
ret = addr; |
ecc1a8993 do_mremap() untan... |
703 |
goto out; |
1da177e4c Linux-2.6.12-rc2 |
704 705 706 |
} /* |
ecc1a8993 do_mremap() untan... |
707 |
* Ok, we need to grow.. |
1da177e4c Linux-2.6.12-rc2 |
708 |
*/ |
e346b3813 mm/mremap: add MR... |
709 |
vma = vma_to_resize(addr, old_len, new_len, flags, &charged); |
54f5de709 untangling do_mre... |
710 711 |
if (IS_ERR(vma)) { ret = PTR_ERR(vma); |
1da177e4c Linux-2.6.12-rc2 |
712 |
goto out; |
119f657c7 [PATCH] RLIMIT_AS... |
713 |
} |
1da177e4c Linux-2.6.12-rc2 |
714 |
|
1da177e4c Linux-2.6.12-rc2 |
715 |
/* old_len exactly to the end of the area.. |
1da177e4c Linux-2.6.12-rc2 |
716 |
*/ |
ecc1a8993 do_mremap() untan... |
717 |
if (old_len == vma->vm_end - addr) { |
1da177e4c Linux-2.6.12-rc2 |
718 |
/* can we just expand the current mapping? */ |
1a0ef85f8 do_mremap() untan... |
719 |
if (vma_expandable(vma, new_len - old_len)) { |
1da177e4c Linux-2.6.12-rc2 |
720 |
int pages = (new_len - old_len) >> PAGE_SHIFT; |
5beb49305 mm: change anon_v... |
721 722 723 724 725 |
if (vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL)) { ret = -ENOMEM; goto out; } |
1da177e4c Linux-2.6.12-rc2 |
726 |
|
846383359 mm: rework virtua... |
727 |
vm_stat_account(mm, vma->vm_flags, pages); |
1da177e4c Linux-2.6.12-rc2 |
728 |
if (vma->vm_flags & VM_LOCKED) { |
d0de32d9b [PATCH] mm: do_mr... |
729 |
mm->locked_vm += pages; |
81909b842 mm: use mm_popula... |
730 731 |
locked = true; new_addr = addr; |
1da177e4c Linux-2.6.12-rc2 |
732 733 734 735 736 737 738 739 740 741 742 743 |
} ret = addr; goto out; } } /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { |
ecc1a8993 do_mremap() untan... |
744 745 746 747 748 |
unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; new_addr = get_unmapped_area(vma->vm_file, 0, new_len, |
935874141 fix pgoff in "hav... |
749 750 751 |
vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); |
ff68dac6d mm/mmap.c: use IS... |
752 |
if (IS_ERR_VALUE(new_addr)) { |
ecc1a8993 do_mremap() untan... |
753 754 |
ret = new_addr; goto out; |
1da177e4c Linux-2.6.12-rc2 |
755 |
} |
ecc1a8993 do_mremap() untan... |
756 |
|
72f87654c userfaultfd: non-... |
757 |
ret = move_vma(vma, addr, old_len, new_len, new_addr, |
e346b3813 mm/mremap: add MR... |
758 |
&locked, flags, &uf, &uf_unmap); |
1da177e4c Linux-2.6.12-rc2 |
759 760 |
} out: |
f19cb115a mm/mremap: use of... |
761 |
if (offset_in_page(ret)) { |
1da177e4c Linux-2.6.12-rc2 |
762 |
vm_unacct_memory(charged); |
fa1f68cc8 mm: use false for... |
763 |
locked = false; |
d456fb9e5 mremap: don't do ... |
764 |
} |
85a06835f mm: mremap: downg... |
765 |
if (downgraded) |
d8ed45c5d mmap locking API:... |
766 |
mmap_read_unlock(current->mm); |
85a06835f mm: mremap: downg... |
767 |
else |
d8ed45c5d mmap locking API:... |
768 |
mmap_write_unlock(current->mm); |
81909b842 mm: use mm_popula... |
769 770 |
if (locked && new_len > old_len) mm_populate(new_addr + old_len, new_len - old_len); |
b22823719 userfaultfd: non-... |
771 |
userfaultfd_unmap_complete(mm, &uf_unmap_early); |
d15649260 userfaultfd: fix ... |
772 |
mremap_userfaultfd_complete(&uf, addr, ret, old_len); |
897ab3e0c userfaultfd: non-... |
773 |
userfaultfd_unmap_complete(mm, &uf_unmap); |
1da177e4c Linux-2.6.12-rc2 |
774 775 |
return ret; } |