Blame view
mm/mremap.c
27.8 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c
|
2 3 4 5 6 |
/* * mm/mremap.c * * (C) Copyright 1996 Linus Torvalds * |
046c68842
|
7 |
* Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4c
|
8 9 10 11 12 |
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> |
1da177e4c
|
13 |
#include <linux/shm.h> |
1ff829957
|
14 |
#include <linux/ksm.h> |
1da177e4c
|
15 16 |
#include <linux/mman.h> #include <linux/swap.h> |
c59ede7b7
|
17 |
#include <linux/capability.h> |
1da177e4c
|
18 |
#include <linux/fs.h> |
6dec97dc9
|
19 |
#include <linux/swapops.h> |
1da177e4c
|
20 21 22 |
#include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> |
cddb8a5c1
|
23 |
#include <linux/mmu_notifier.h> |
2581d2023
|
24 |
#include <linux/uaccess.h> |
72f87654c
|
25 |
#include <linux/userfaultfd_k.h> |
1da177e4c
|
26 |
|
1da177e4c
|
27 |
#include <asm/cacheflush.h> |
3bbda69c4
|
28 |
#include <asm/tlb.h> |
0881ace29
|
29 |
#include <asm/pgalloc.h> |
1da177e4c
|
30 |
|
ba470de43
|
31 |
#include "internal.h" |
c49dd3401
|
32 |
static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) |
1da177e4c
|
33 34 |
{ pgd_t *pgd; |
c2febafc6
|
35 |
p4d_t *p4d; |
1da177e4c
|
36 |
pud_t *pud; |
1da177e4c
|
37 38 39 40 |
pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; |
c2febafc6
|
41 42 43 44 45 |
p4d = p4d_offset(pgd, addr); if (p4d_none_or_clear_bad(p4d)) return NULL; pud = pud_offset(p4d, addr); |
1da177e4c
|
46 47 |
if (pud_none_or_clear_bad(pud)) return NULL; |
c49dd3401
|
48 49 50 51 52 53 54 55 56 57 58 |
return pud; } static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) { pud_t *pud; pmd_t *pmd; pud = get_old_pud(mm, addr); if (!pud) return NULL; |
1da177e4c
|
59 |
pmd = pmd_offset(pud, addr); |
37a1c49a9
|
60 |
if (pmd_none(*pmd)) |
1da177e4c
|
61 |
return NULL; |
7be7a5469
|
62 |
return pmd; |
1da177e4c
|
63 |
} |
c49dd3401
|
64 |
static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, |
8ac1f8320
|
65 |
unsigned long addr) |
1da177e4c
|
66 67 |
{ pgd_t *pgd; |
c2febafc6
|
68 |
p4d_t *p4d; |
1da177e4c
|
69 70 |
pgd = pgd_offset(mm, addr); |
c2febafc6
|
71 72 73 |
p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return NULL; |
c49dd3401
|
74 75 76 77 78 79 80 81 82 83 84 |
return pud_alloc(mm, p4d, addr); } static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) { pud_t *pud; pmd_t *pmd; pud = alloc_new_pud(mm, vma, addr); |
1da177e4c
|
85 |
if (!pud) |
c74df32c7
|
86 |
return NULL; |
7be7a5469
|
87 |
|
1da177e4c
|
88 |
pmd = pmd_alloc(mm, pud, addr); |
57a8f0cdb
|
89 |
if (!pmd) |
c74df32c7
|
90 |
return NULL; |
7be7a5469
|
91 |
|
8ac1f8320
|
92 |
VM_BUG_ON(pmd_trans_huge(*pmd)); |
c74df32c7
|
93 |
|
7be7a5469
|
94 |
return pmd; |
1da177e4c
|
95 |
} |
1d069b7dd
|
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
static void take_rmap_locks(struct vm_area_struct *vma) { if (vma->vm_file) i_mmap_lock_write(vma->vm_file->f_mapping); if (vma->anon_vma) anon_vma_lock_write(vma->anon_vma); } static void drop_rmap_locks(struct vm_area_struct *vma) { if (vma->anon_vma) anon_vma_unlock_write(vma->anon_vma); if (vma->vm_file) i_mmap_unlock_write(vma->vm_file->f_mapping); } |
6dec97dc9
|
111 112 113 114 115 116 117 118 119 120 121 |
static pte_t move_soft_dirty_pte(pte_t pte) { /* * Set soft dirty bit so we can notice * in userspace the ptes were moved. */ #ifdef CONFIG_MEM_SOFT_DIRTY if (pte_present(pte)) pte = pte_mksoft_dirty(pte); else if (is_swap_pte(pte)) pte = pte_swp_mksoft_dirty(pte); |
6dec97dc9
|
122 123 124 |
#endif return pte; } |
7be7a5469
|
125 126 127 |
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, |
eb66ae030
|
128 |
unsigned long new_addr, bool need_rmap_locks) |
1da177e4c
|
129 |
{ |
1da177e4c
|
130 |
struct mm_struct *mm = vma->vm_mm; |
7be7a5469
|
131 |
pte_t *old_pte, *new_pte, pte; |
4c21e2f24
|
132 |
spinlock_t *old_ptl, *new_ptl; |
5d1904204
|
133 134 |
bool force_flush = false; unsigned long len = old_end - old_addr; |
1da177e4c
|
135 |
|
38a76013a
|
136 |
/* |
c8c06efa8
|
137 |
* When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma |
38a76013a
|
138 139 140 141 142 143 144 145 |
* locks to ensure that rmap will always observe either the old or the * new ptes. This is the easiest way to avoid races with * truncate_pagecache(), page migration, etc... * * When need_rmap_locks is false, we use other ways to avoid * such races: * * - During exec() shift_arg_pages(), we use a specially tagged vma |
222100eed
|
146 |
* which rmap call sites look for using vma_is_temporary_stack(). |
38a76013a
|
147 148 149 150 151 152 153 |
* * - During mremap(), new_vma is often known to be placed after vma * in rmap traversal order. This ensures rmap will always observe * either the old pte, or the new pte, or both (the page table locks * serialize access to individual ptes, but only rmap traversal * order guarantees that we won't miss both the old and new ptes). */ |
1d069b7dd
|
154 155 |
if (need_rmap_locks) take_rmap_locks(vma); |
1da177e4c
|
156 |
|
4c21e2f24
|
157 158 |
/* * We don't have to worry about the ordering of src and dst |
c1e8d7c6a
|
159 |
* pte locks because exclusive mmap_lock prevents deadlock. |
4c21e2f24
|
160 |
*/ |
c74df32c7
|
161 |
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); |
ece0e2b64
|
162 |
new_pte = pte_offset_map(new_pmd, new_addr); |
4c21e2f24
|
163 164 |
new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) |
f20dc5f7c
|
165 |
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
3ea277194
|
166 |
flush_tlb_batched_pending(vma->vm_mm); |
6606c3e0d
|
167 |
arch_enter_lazy_mmu_mode(); |
7be7a5469
|
168 169 170 171 172 |
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; |
5d1904204
|
173 |
|
a2ce2666a
|
174 |
pte = ptep_get_and_clear(mm, old_addr, old_pte); |
5d1904204
|
175 |
/* |
eb66ae030
|
176 |
* If we are remapping a valid PTE, make sure |
a2ce2666a
|
177 |
* to flush TLB before we drop the PTL for the |
eb66ae030
|
178 |
* PTE. |
a2ce2666a
|
179 |
* |
eb66ae030
|
180 181 182 183 184 |
* NOTE! Both old and new PTL matter: the old one * for racing with page_mkclean(), the new one to * make sure the physical page stays valid until * the TLB entry for the old mapping has been * flushed. |
5d1904204
|
185 |
*/ |
eb66ae030
|
186 |
if (pte_present(pte)) |
5d1904204
|
187 |
force_flush = true; |
7be7a5469
|
188 |
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
6dec97dc9
|
189 190 |
pte = move_soft_dirty_pte(pte); set_pte_at(mm, new_addr, new_pte, pte); |
1da177e4c
|
191 |
} |
7be7a5469
|
192 |
|
6606c3e0d
|
193 |
arch_leave_lazy_mmu_mode(); |
eb66ae030
|
194 195 |
if (force_flush) flush_tlb_range(vma, old_end - len, old_end); |
4c21e2f24
|
196 197 |
if (new_ptl != old_ptl) spin_unlock(new_ptl); |
ece0e2b64
|
198 |
pte_unmap(new_pte - 1); |
c74df32c7
|
199 |
pte_unmap_unlock(old_pte - 1, old_ptl); |
1d069b7dd
|
200 201 |
if (need_rmap_locks) drop_rmap_locks(vma); |
1da177e4c
|
202 |
} |
3bbda69c4
|
203 204 205 206 207 208 209 210 |
#ifndef arch_supports_page_table_move #define arch_supports_page_table_move arch_supports_page_table_move static inline bool arch_supports_page_table_move(void) { return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) || IS_ENABLED(CONFIG_HAVE_MOVE_PUD); } #endif |
2c91bd4a4
|
211 212 |
#ifdef CONFIG_HAVE_MOVE_PMD static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
b8aa9d9d9
|
213 |
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) |
2c91bd4a4
|
214 215 216 217 |
{ spinlock_t *old_ptl, *new_ptl; struct mm_struct *mm = vma->vm_mm; pmd_t pmd; |
3bbda69c4
|
218 219 |
if (!arch_supports_page_table_move()) return false; |
2c91bd4a4
|
220 221 |
/* * The destination pmd shouldn't be established, free_pgtables() |
f81fdd0c4
|
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 |
* should have released it. * * However, there's a case during execve() where we use mremap * to move the initial stack, and in that case the target area * may overlap the source area (always moving down). * * If everything is PMD-aligned, that works fine, as moving * each pmd down will clear the source pmd. But if we first * have a few 4kB-only pages that get moved down, and then * hit the "now the rest is PMD-aligned, let's do everything * one pmd at a time", we will still have the old (now empty * of any 4kB pages, but still there) PMD in the page table * tree. * * Warn on it once - because we really should try to figure * out how to do this better - but then say "I won't move * this pmd". * * One alternative might be to just unmap the target pmd at * this point, and verify that it really is empty. We'll see. |
2c91bd4a4
|
242 |
*/ |
f81fdd0c4
|
243 |
if (WARN_ON_ONCE(!pmd_none(*new_pmd))) |
2c91bd4a4
|
244 245 246 247 |
return false; /* * We don't have to worry about the ordering of src and dst |
c1e8d7c6a
|
248 |
* ptlocks because exclusive mmap_lock prevents deadlock. |
2c91bd4a4
|
249 250 251 252 253 254 255 256 257 258 259 |
*/ old_ptl = pmd_lock(vma->vm_mm, old_pmd); new_ptl = pmd_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); /* Clear the pmd */ pmd = *old_pmd; pmd_clear(old_pmd); VM_BUG_ON(!pmd_none(*new_pmd)); |
0881ace29
|
260 |
pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); |
2c91bd4a4
|
261 262 263 264 265 266 267 |
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); return true; } |
c49dd3401
|
268 269 270 271 272 273 274 275 |
#else static inline bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) { return false; } #endif |
d6655dff2
|
276 |
#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) |
c49dd3401
|
277 278 279 280 281 282 |
static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) { spinlock_t *old_ptl, *new_ptl; struct mm_struct *mm = vma->vm_mm; pud_t pud; |
3bbda69c4
|
283 284 |
if (!arch_supports_page_table_move()) return false; |
c49dd3401
|
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 |
/* * The destination pud shouldn't be established, free_pgtables() * should have released it. */ if (WARN_ON_ONCE(!pud_none(*new_pud))) return false; /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ old_ptl = pud_lock(vma->vm_mm, old_pud); new_ptl = pud_lockptr(mm, new_pud); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); /* Clear the pud */ pud = *old_pud; pud_clear(old_pud); VM_BUG_ON(!pud_none(*new_pud)); |
0881ace29
|
306 |
pud_populate(mm, new_pud, pud_pgtable(pud)); |
c49dd3401
|
307 308 309 310 311 312 313 314 315 316 317 318 319 320 |
flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); return true; } #else static inline bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) { return false; } |
2c91bd4a4
|
321 |
#endif |
7d846db7d
|
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 |
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) { spinlock_t *old_ptl, *new_ptl; struct mm_struct *mm = vma->vm_mm; pud_t pud; /* * The destination pud shouldn't be established, free_pgtables() * should have released it. */ if (WARN_ON_ONCE(!pud_none(*new_pud))) return false; /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ old_ptl = pud_lock(vma->vm_mm, old_pud); new_ptl = pud_lockptr(mm, new_pud); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); /* Clear the pud */ pud = *old_pud; pud_clear(old_pud); VM_BUG_ON(!pud_none(*new_pud)); /* Set the new pud */ /* mark soft_ditry when we add pud level soft dirty support */ set_pud_at(mm, new_addr, new_pud, pud); flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); return true; } #else static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) { WARN_ON_ONCE(1); return false; } #endif |
c49dd3401
|
371 372 373 374 |
enum pgt_entry { NORMAL_PMD, HPAGE_PMD, NORMAL_PUD, |
7d846db7d
|
375 |
HPAGE_PUD, |
c49dd3401
|
376 377 378 379 380 381 382 |
}; /* * Returns an extent of the corresponding size for the pgt_entry specified if * valid. Else returns a smaller extent bounded by the end of the source and * destination pgt_entry. */ |
a30a29091
|
383 384 385 |
static __always_inline unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr, unsigned long old_end, unsigned long new_addr) |
c49dd3401
|
386 387 388 389 390 391 392 393 394 |
{ unsigned long next, extent, mask, size; switch (entry) { case HPAGE_PMD: case NORMAL_PMD: mask = PMD_MASK; size = PMD_SIZE; break; |
7d846db7d
|
395 |
case HPAGE_PUD: |
c49dd3401
|
396 397 398 399 400 401 402 403 404 405 406 |
case NORMAL_PUD: mask = PUD_MASK; size = PUD_SIZE; break; default: BUILD_BUG(); break; } next = (old_addr + size) & mask; /* even if next overflowed, extent below will be ok */ |
e05986ee7
|
407 408 409 |
extent = next - old_addr; if (extent > old_end - old_addr) extent = old_end - old_addr; |
c49dd3401
|
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 |
next = (new_addr + size) & mask; if (extent > next - new_addr) extent = next - new_addr; return extent; } /* * Attempts to speedup the move by moving entry at the level corresponding to * pgt_entry. Returns true if the move was successful, else false. */ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, void *old_entry, void *new_entry, bool need_rmap_locks) { bool moved = false; /* See comment in move_ptes() */ if (need_rmap_locks) take_rmap_locks(vma); switch (entry) { case NORMAL_PMD: moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, new_entry); break; case NORMAL_PUD: moved = move_normal_pud(vma, old_addr, new_addr, old_entry, new_entry); break; case HPAGE_PMD: moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && move_huge_pmd(vma, old_addr, new_addr, old_entry, new_entry); break; |
7d846db7d
|
444 445 446 447 448 |
case HPAGE_PUD: moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && move_huge_pud(vma, old_addr, new_addr, old_entry, new_entry); break; |
c49dd3401
|
449 450 451 452 453 454 455 456 457 458 |
default: WARN_ON_ONCE(1); break; } if (need_rmap_locks) drop_rmap_locks(vma); return moved; } |
b6a2fea39
|
459 |
unsigned long move_page_tables(struct vm_area_struct *vma, |
1da177e4c
|
460 |
unsigned long old_addr, struct vm_area_struct *new_vma, |
38a76013a
|
461 462 |
unsigned long new_addr, unsigned long len, bool need_rmap_locks) |
1da177e4c
|
463 |
{ |
c49dd3401
|
464 |
unsigned long extent, old_end; |
ac46d4f3c
|
465 |
struct mmu_notifier_range range; |
7be7a5469
|
466 |
pmd_t *old_pmd, *new_pmd; |
7d846db7d
|
467 |
pud_t *old_pud, *new_pud; |
1da177e4c
|
468 |
|
a04cb99c5
|
469 470 |
if (!len) return 0; |
7be7a5469
|
471 472 |
old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); |
1da177e4c
|
473 |
|
6f4f13e8d
|
474 475 |
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, old_addr, old_end); |
ac46d4f3c
|
476 |
mmu_notifier_invalidate_range_start(&range); |
7b6efc2bc
|
477 |
|
7be7a5469
|
478 |
for (; old_addr < old_end; old_addr += extent, new_addr += extent) { |
1da177e4c
|
479 |
cond_resched(); |
c49dd3401
|
480 481 482 483 484 |
/* * If extent is PUD-sized try to speed up the move by moving at the * PUD level if possible. */ extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); |
c49dd3401
|
485 |
|
7d846db7d
|
486 487 488 489 490 491 492 493 494 495 496 |
old_pud = get_old_pud(vma->vm_mm, old_addr); if (!old_pud) continue; new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); if (!new_pud) break; if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { if (extent == HPAGE_PUD_SIZE) { move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, old_pud, new_pud, need_rmap_locks); /* We ignore and continue on error? */ |
c49dd3401
|
497 |
continue; |
7d846db7d
|
498 499 |
} } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { |
c49dd3401
|
500 |
if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, |
97113eb39
|
501 |
old_pud, new_pud, true)) |
c49dd3401
|
502 503 504 505 |
continue; } extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr); |
7be7a5469
|
506 507 508 |
old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; |
8ac1f8320
|
509 |
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); |
7be7a5469
|
510 511 |
if (!new_pmd) break; |
c49dd3401
|
512 513 514 515 516 517 |
if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE && move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, old_pmd, new_pmd, need_rmap_locks)) continue; |
4b471e889
|
518 |
split_huge_pmd(vma, old_pmd, old_addr); |
337d9abf1
|
519 |
if (pmd_trans_unstable(old_pmd)) |
6b9116a65
|
520 |
continue; |
c49dd3401
|
521 522 |
} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && extent == PMD_SIZE) { |
2c91bd4a4
|
523 524 525 526 |
/* * If the extent is PMD-sized, try to speed the move by * moving at the PMD level if possible. */ |
c49dd3401
|
527 |
if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, |
97113eb39
|
528 |
old_pmd, new_pmd, true)) |
2c91bd4a4
|
529 |
continue; |
37a1c49a9
|
530 |
} |
2c91bd4a4
|
531 |
|
4cf589249
|
532 |
if (pte_alloc(new_vma->vm_mm, new_pmd)) |
37a1c49a9
|
533 |
break; |
5d1904204
|
534 |
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, |
eb66ae030
|
535 |
new_pmd, new_addr, need_rmap_locks); |
1da177e4c
|
536 |
} |
7b6efc2bc
|
537 |
|
ac46d4f3c
|
538 |
mmu_notifier_invalidate_range_end(&range); |
7be7a5469
|
539 540 |
return len + old_addr - old_end; /* how much done */ |
1da177e4c
|
541 542 543 544 |
} static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, |
72f87654c
|
545 |
unsigned long new_len, unsigned long new_addr, |
e346b3813
|
546 547 |
bool *locked, unsigned long flags, struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) |
1da177e4c
|
548 549 550 551 552 553 554 |
{ struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; unsigned long excess = 0; |
365e9c87a
|
555 |
unsigned long hiwater_vm; |
1da177e4c
|
556 |
int split = 0; |
73d5e0629
|
557 |
int err = 0; |
38a76013a
|
558 |
bool need_rmap_locks; |
1da177e4c
|
559 560 561 562 563 564 565 |
/* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; |
73d5e0629
|
566 567 568 569 570 571 572 573 |
if (vma->vm_ops && vma->vm_ops->may_split) { if (vma->vm_start != old_addr) err = vma->vm_ops->may_split(vma, old_addr); if (!err && vma->vm_end != old_addr + old_len) err = vma->vm_ops->may_split(vma, old_addr + old_len); if (err) return err; } |
1ff829957
|
574 575 576 577 578 579 580 |
/* * Advise KSM to break any KSM pages in the area to be moved: * it would be confusing if they were to turn up at the new * location, where they happen to coincide with different KSM * pages recently unmapped. But leave vma->vm_flags as it was, * so KSM can come around to merge on vma and new_vma afterwards. */ |
7103ad323
|
581 582 583 584 |
err = ksm_madvise(vma, old_addr, old_addr + old_len, MADV_UNMERGEABLE, &vm_flags); if (err) return err; |
1ff829957
|
585 |
|
ad8ee77ea
|
586 587 588 589 |
if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) { if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT)) return -ENOMEM; } |
1da177e4c
|
590 |
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); |
38a76013a
|
591 592 |
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, &need_rmap_locks); |
ad8ee77ea
|
593 594 595 |
if (!new_vma) { if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) vm_unacct_memory(new_len >> PAGE_SHIFT); |
1da177e4c
|
596 |
return -ENOMEM; |
ad8ee77ea
|
597 |
} |
1da177e4c
|
598 |
|
38a76013a
|
599 600 |
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, need_rmap_locks); |
1da177e4c
|
601 |
if (moved_len < old_len) { |
df1eab303
|
602 |
err = -ENOMEM; |
5477e70a6
|
603 |
} else if (vma->vm_ops && vma->vm_ops->mremap) { |
14d071134
|
604 |
err = vma->vm_ops->mremap(new_vma); |
df1eab303
|
605 606 607 |
} if (unlikely(err)) { |
1da177e4c
|
608 609 610 611 612 |
/* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ |
38a76013a
|
613 614 |
move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, true); |
1da177e4c
|
615 616 617 |
vma = new_vma; old_len = new_len; old_addr = new_addr; |
df1eab303
|
618 |
new_addr = err; |
4abad2ca4
|
619 |
} else { |
72f87654c
|
620 |
mremap_userfaultfd_prep(new_vma, uf); |
b2edffdd9
|
621 |
} |
1da177e4c
|
622 623 |
/* Conceal VM_ACCOUNT so old reservation is not undone */ |
ad8ee77ea
|
624 |
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { |
1da177e4c
|
625 626 627 628 629 630 |
vma->vm_flags &= ~VM_ACCOUNT; excess = vma->vm_end - vma->vm_start - old_len; if (old_addr > vma->vm_start && old_addr + old_len < vma->vm_end) split = 1; } |
717990629
|
631 |
/* |
365e9c87a
|
632 633 634 635 636 637 638 |
* If we failed to move page tables we still do total_vm increment * since do_munmap() will decrement it by old_len == new_len. * * Since total_vm is about to be raised artificially high for a * moment, we need to restore high watermark afterwards: if stats * are taken meanwhile, total_vm and hiwater_vm appear too high. * If this were a serious issue, we'd add a flag to do_munmap(). |
717990629
|
639 |
*/ |
365e9c87a
|
640 |
hiwater_vm = mm->hiwater_vm; |
846383359
|
641 |
vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); |
717990629
|
642 |
|
d9fe4fab1
|
643 644 645 |
/* Tell pfnmap has moved from this vma */ if (unlikely(vma->vm_flags & VM_PFNMAP)) untrack_pfn_moved(vma); |
e346b3813
|
646 |
if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { |
e346b3813
|
647 648 |
/* We always clear VM_LOCKED[ONFAULT] on the old vma */ vma->vm_flags &= VM_LOCKED_CLEAR_MASK; |
1583aa278
|
649 650 651 652 653 654 655 |
/* * anon_vma links of the old vma is no longer needed after its page * table has been moved. */ if (new_vma != vma && vma->vm_start == old_addr && vma->vm_end == (old_addr + old_len)) unlink_anon_vmas(vma); |
e346b3813
|
656 |
/* Because we won't unmap we don't need to touch locked_vm */ |
ad8ee77ea
|
657 |
return new_addr; |
e346b3813
|
658 |
} |
897ab3e0c
|
659 |
if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { |
1da177e4c
|
660 |
/* OOM: unable to split vma, just get accounts right */ |
ad8ee77ea
|
661 |
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) |
5e22928ab
|
662 |
vm_acct_memory(old_len >> PAGE_SHIFT); |
1da177e4c
|
663 664 |
excess = 0; } |
e346b3813
|
665 666 667 668 669 |
if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; *locked = true; } |
ad8ee77ea
|
670 |
|
365e9c87a
|
671 |
mm->hiwater_vm = hiwater_vm; |
1da177e4c
|
672 673 674 675 676 677 678 |
/* Restore VM_ACCOUNT if one or two pieces of vma left */ if (excess) { vma->vm_flags |= VM_ACCOUNT; if (split) vma->vm_next->vm_flags |= VM_ACCOUNT; } |
1da177e4c
|
679 680 |
return new_addr; } |
54f5de709
|
681 |
static struct vm_area_struct *vma_to_resize(unsigned long addr, |
e346b3813
|
682 683 |
unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long *p) |
54f5de709
|
684 685 |
{ struct mm_struct *mm = current->mm; |
5aaf07f08
|
686 |
struct vm_area_struct *vma; |
1d3916869
|
687 |
unsigned long pgoff; |
54f5de709
|
688 |
|
5aaf07f08
|
689 690 |
vma = vma_lookup(mm, addr); if (!vma) |
6cd576130
|
691 |
return ERR_PTR(-EFAULT); |
54f5de709
|
692 |
|
dba58d3b8
|
693 694 695 696 697 698 699 700 701 702 703 704 705 |
/* * !old_len is a special case where an attempt is made to 'duplicate' * a mapping. This makes no sense for private mappings as it will * instead create a fresh/new mapping unrelated to the original. This * is contrary to the basic idea of mremap which creates new mappings * based on the original. There are no known use cases for this * behavior. As a result, fail such attempts. */ if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported. ", current->comm, current->pid); return ERR_PTR(-EINVAL); } |
a46093878
|
706 707 |
if ((flags & MREMAP_DONTUNMAP) && (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) |
e346b3813
|
708 |
return ERR_PTR(-EINVAL); |
54f5de709
|
709 |
if (is_vm_hugetlb_page(vma)) |
6cd576130
|
710 |
return ERR_PTR(-EINVAL); |
54f5de709
|
711 712 713 |
/* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) |
6cd576130
|
714 |
return ERR_PTR(-EFAULT); |
54f5de709
|
715 |
|
1d3916869
|
716 717 |
if (new_len == old_len) return vma; |
982134ba6
|
718 |
/* Need to be careful about a growing mapping */ |
1d3916869
|
719 720 721 722 723 724 725 |
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) return ERR_PTR(-EINVAL); if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) return ERR_PTR(-EFAULT); |
54f5de709
|
726 727 728 729 |
if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; |
59e99e5b9
|
730 |
lock_limit = rlimit(RLIMIT_MEMLOCK); |
54f5de709
|
731 732 |
locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) |
6cd576130
|
733 |
return ERR_PTR(-EAGAIN); |
54f5de709
|
734 |
} |
846383359
|
735 736 |
if (!may_expand_vm(mm, vma->vm_flags, (new_len - old_len) >> PAGE_SHIFT)) |
6cd576130
|
737 |
return ERR_PTR(-ENOMEM); |
54f5de709
|
738 739 740 |
if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; |
191c54244
|
741 |
if (security_vm_enough_memory_mm(mm, charged)) |
6cd576130
|
742 |
return ERR_PTR(-ENOMEM); |
54f5de709
|
743 744 745 746 |
*p = charged; } return vma; |
54f5de709
|
747 |
} |
81909b842
|
748 |
static unsigned long mremap_to(unsigned long addr, unsigned long old_len, |
72f87654c
|
749 |
unsigned long new_addr, unsigned long new_len, bool *locked, |
e346b3813
|
750 |
unsigned long flags, struct vm_userfaultfd_ctx *uf, |
b22823719
|
751 |
struct list_head *uf_unmap_early, |
897ab3e0c
|
752 |
struct list_head *uf_unmap) |
ecc1a8993
|
753 754 755 756 757 |
{ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; |
e346b3813
|
758 |
unsigned long map_flags = 0; |
ecc1a8993
|
759 |
|
f19cb115a
|
760 |
if (offset_in_page(new_addr)) |
ecc1a8993
|
761 762 763 764 |
goto out; if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; |
9943242ca
|
765 766 |
/* Ensure the old/new locations do not overlap */ if (addr + old_len > new_addr && new_addr + new_len > addr) |
ecc1a8993
|
767 |
goto out; |
ea2c3f6f5
|
768 769 770 771 772 773 774 775 776 |
/* * move_vma() need us to stay 4 maps below the threshold, otherwise * it will bail out at the very beginning. * That is a problem if we have already unmaped the regions here * (new_addr, and old_addr), because userspace will not know the * state of the vma's after it gets -ENOMEM. * So, to avoid such scenario we can pre-compute if the whole * operation has high chances to success map-wise. * Worst-scenario case is when both vma's (new_addr and old_addr) get |
f0953a1bb
|
777 |
* split in 3 before unmapping it. |
ea2c3f6f5
|
778 779 780 781 782 783 |
* That means 2 more maps (1 for each) to the ones we already hold. * Check whether current map count plus 2 still leads us to 4 maps below * the threshold, otherwise return -ENOMEM here to be more safe. */ if ((mm->map_count + 2) >= sysctl_max_map_count - 3) return -ENOMEM; |
e346b3813
|
784 785 786 787 788 |
if (flags & MREMAP_FIXED) { ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); if (ret) goto out; } |
ecc1a8993
|
789 790 |
if (old_len >= new_len) { |
897ab3e0c
|
791 |
ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); |
ecc1a8993
|
792 793 794 795 |
if (ret && old_len != new_len) goto out; old_len = new_len; } |
e346b3813
|
796 |
vma = vma_to_resize(addr, old_len, new_len, flags, &charged); |
ecc1a8993
|
797 798 799 800 |
if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } |
e346b3813
|
801 802 803 804 805 806 807 808 809 |
/* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ if (flags & MREMAP_DONTUNMAP && !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { ret = -ENOMEM; goto out; } if (flags & MREMAP_FIXED) map_flags |= MAP_FIXED; |
097eed103
|
810 811 |
if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; |
9206de95b
|
812 |
|
097eed103
|
813 814 815 |
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); |
ff68dac6d
|
816 |
if (IS_ERR_VALUE(ret)) |
097eed103
|
817 |
goto out1; |
e346b3813
|
818 819 820 821 822 |
/* We got a new mapping */ if (!(flags & MREMAP_FIXED)) new_addr = ret; ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, |
897ab3e0c
|
823 |
uf_unmap); |
e346b3813
|
824 |
|
f19cb115a
|
825 |
if (!(offset_in_page(ret))) |
097eed103
|
826 |
goto out; |
e346b3813
|
827 |
|
097eed103
|
828 829 |
out1: vm_unacct_memory(charged); |
ecc1a8993
|
830 831 832 833 |
out: return ret; } |
1a0ef85f8
|
834 835 |
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { |
f106af4e9
|
836 |
unsigned long end = vma->vm_end + delta; |
9206de95b
|
837 |
if (end < vma->vm_end) /* overflow */ |
f106af4e9
|
838 |
return 0; |
9206de95b
|
839 |
if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ |
f106af4e9
|
840 841 842 |
return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) |
1a0ef85f8
|
843 |
return 0; |
1a0ef85f8
|
844 845 |
return 1; } |
1da177e4c
|
846 847 848 849 850 851 852 |
/* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ |
63a81db13
|
853 854 855 |
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) |
1da177e4c
|
856 |
{ |
d0de32d9b
|
857 |
struct mm_struct *mm = current->mm; |
1da177e4c
|
858 859 860 |
struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; |
81909b842
|
861 |
bool locked = false; |
85a06835f
|
862 |
bool downgraded = false; |
72f87654c
|
863 |
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; |
b22823719
|
864 |
LIST_HEAD(uf_unmap_early); |
897ab3e0c
|
865 |
LIST_HEAD(uf_unmap); |
1da177e4c
|
866 |
|
b2a84de2a
|
867 868 869 870 871 872 873 874 875 876 |
/* * There is a deliberate asymmetry here: we strip the pointer tag * from the old address but leave the new address alone. This is * for consistency with mmap(), where we prevent the creation of * aliasing mappings in userspace by leaving the tag bits of the * mapping address intact. A non-zero tag will cause the subsequent * range checks to reject the address as invalid. * * See Documentation/arm64/tagged-address-abi.rst for more information. */ |
057d33891
|
877 |
addr = untagged_addr(addr); |
e346b3813
|
878 |
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) |
9a2458a63
|
879 880 881 882 |
return ret; if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) return ret; |
1da177e4c
|
883 |
|
e346b3813
|
884 885 886 887 888 889 890 |
/* * MREMAP_DONTUNMAP is always a move and it does not allow resizing * in the process. */ if (flags & MREMAP_DONTUNMAP && (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) return ret; |
f19cb115a
|
891 |
if (offset_in_page(addr)) |
9a2458a63
|
892 |
return ret; |
1da177e4c
|
893 894 895 896 897 898 899 900 901 902 |
old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); /* * We allow a zero old-len as a special case * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ if (!new_len) |
9a2458a63
|
903 |
return ret; |
d8ed45c5d
|
904 |
if (mmap_write_lock_killable(current->mm)) |
dc0ef0df7
|
905 |
return -EINTR; |
1da177e4c
|
906 |
|
e346b3813
|
907 |
if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { |
9a2458a63
|
908 |
ret = mremap_to(addr, old_len, new_addr, new_len, |
e346b3813
|
909 910 |
&locked, flags, &uf, &uf_unmap_early, &uf_unmap); |
ecc1a8993
|
911 |
goto out; |
1da177e4c
|
912 913 914 915 916 |
} /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. |
85a06835f
|
917 |
* __do_munmap does all the needed commit accounting, and |
c1e8d7c6a
|
918 |
* downgrades mmap_lock to read if so directed. |
1da177e4c
|
919 920 |
*/ if (old_len >= new_len) { |
85a06835f
|
921 922 923 924 925 926 |
int retval; retval = __do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap, true); if (retval < 0 && old_len != new_len) { ret = retval; |
1da177e4c
|
927 |
goto out; |
c1e8d7c6a
|
928 |
/* Returning 1 indicates mmap_lock is downgraded to read. */ |
85a06835f
|
929 930 |
} else if (retval == 1) downgraded = true; |
1da177e4c
|
931 |
ret = addr; |
ecc1a8993
|
932 |
goto out; |
1da177e4c
|
933 934 935 |
} /* |
ecc1a8993
|
936 |
* Ok, we need to grow.. |
1da177e4c
|
937 |
*/ |
e346b3813
|
938 |
vma = vma_to_resize(addr, old_len, new_len, flags, &charged); |
54f5de709
|
939 940 |
if (IS_ERR(vma)) { ret = PTR_ERR(vma); |
1da177e4c
|
941 |
goto out; |
119f657c7
|
942 |
} |
1da177e4c
|
943 |
|
1da177e4c
|
944 |
/* old_len exactly to the end of the area.. |
1da177e4c
|
945 |
*/ |
ecc1a8993
|
946 |
if (old_len == vma->vm_end - addr) { |
1da177e4c
|
947 |
/* can we just expand the current mapping? */ |
1a0ef85f8
|
948 |
if (vma_expandable(vma, new_len - old_len)) { |
1da177e4c
|
949 |
int pages = (new_len - old_len) >> PAGE_SHIFT; |
5beb49305
|
950 951 952 953 954 |
if (vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL)) { ret = -ENOMEM; goto out; } |
1da177e4c
|
955 |
|
846383359
|
956 |
vm_stat_account(mm, vma->vm_flags, pages); |
1da177e4c
|
957 |
if (vma->vm_flags & VM_LOCKED) { |
d0de32d9b
|
958 |
mm->locked_vm += pages; |
81909b842
|
959 960 |
locked = true; new_addr = addr; |
1da177e4c
|
961 962 963 964 965 966 967 968 969 970 971 972 |
} ret = addr; goto out; } } /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { |
ecc1a8993
|
973 974 975 976 977 |
unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; new_addr = get_unmapped_area(vma->vm_file, 0, new_len, |
935874141
|
978 979 980 |
vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); |
ff68dac6d
|
981 |
if (IS_ERR_VALUE(new_addr)) { |
ecc1a8993
|
982 983 |
ret = new_addr; goto out; |
1da177e4c
|
984 |
} |
ecc1a8993
|
985 |
|
72f87654c
|
986 |
ret = move_vma(vma, addr, old_len, new_len, new_addr, |
e346b3813
|
987 |
&locked, flags, &uf, &uf_unmap); |
1da177e4c
|
988 989 |
} out: |
f19cb115a
|
990 |
if (offset_in_page(ret)) { |
1da177e4c
|
991 |
vm_unacct_memory(charged); |
fa1f68cc8
|
992 |
locked = false; |
d456fb9e5
|
993 |
} |
85a06835f
|
994 |
if (downgraded) |
d8ed45c5d
|
995 |
mmap_read_unlock(current->mm); |
85a06835f
|
996 |
else |
d8ed45c5d
|
997 |
mmap_write_unlock(current->mm); |
81909b842
|
998 999 |
if (locked && new_len > old_len) mm_populate(new_addr + old_len, new_len - old_len); |
b22823719
|
1000 |
userfaultfd_unmap_complete(mm, &uf_unmap_early); |
d15649260
|
1001 |
mremap_userfaultfd_complete(&uf, addr, ret, old_len); |
897ab3e0c
|
1002 |
userfaultfd_unmap_complete(mm, &uf_unmap); |
1da177e4c
|
1003 1004 |
return ret; } |