Blame view
mm/mremap.c
14 KB
1da177e4c
|
1 2 3 4 5 |
/* * mm/mremap.c * * (C) Copyright 1996 Linus Torvalds * |
046c68842
|
6 |
* Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4c
|
7 8 9 10 11 |
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> |
1da177e4c
|
12 |
#include <linux/shm.h> |
1ff829957
|
13 |
#include <linux/ksm.h> |
1da177e4c
|
14 15 |
#include <linux/mman.h> #include <linux/swap.h> |
c59ede7b7
|
16 |
#include <linux/capability.h> |
1da177e4c
|
17 18 19 20 |
#include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> |
cddb8a5c1
|
21 |
#include <linux/mmu_notifier.h> |
1da177e4c
|
22 23 24 25 |
#include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> |
ba470de43
|
26 |
#include "internal.h" |
7be7a5469
|
27 |
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) |
1da177e4c
|
28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
{ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) return NULL; pmd = pmd_offset(pud, addr); |
37a1c49a9
|
42 |
if (pmd_none(*pmd)) |
1da177e4c
|
43 |
return NULL; |
7be7a5469
|
44 |
return pmd; |
1da177e4c
|
45 |
} |
8ac1f8320
|
46 47 |
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) |
1da177e4c
|
48 49 50 |
{ pgd_t *pgd; pud_t *pud; |
c74df32c7
|
51 |
pmd_t *pmd; |
1da177e4c
|
52 53 |
pgd = pgd_offset(mm, addr); |
1da177e4c
|
54 55 |
pud = pud_alloc(mm, pgd, addr); if (!pud) |
c74df32c7
|
56 |
return NULL; |
7be7a5469
|
57 |
|
1da177e4c
|
58 |
pmd = pmd_alloc(mm, pud, addr); |
7be7a5469
|
59 |
if (!pmd) |
c74df32c7
|
60 |
return NULL; |
7be7a5469
|
61 |
|
8ac1f8320
|
62 |
VM_BUG_ON(pmd_trans_huge(*pmd)); |
c74df32c7
|
63 |
|
7be7a5469
|
64 |
return pmd; |
1da177e4c
|
65 |
} |
7be7a5469
|
66 67 68 69 |
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr) |
1da177e4c
|
70 71 72 |
{ struct address_space *mapping = NULL; struct mm_struct *mm = vma->vm_mm; |
7be7a5469
|
73 |
pte_t *old_pte, *new_pte, pte; |
4c21e2f24
|
74 |
spinlock_t *old_ptl, *new_ptl; |
1da177e4c
|
75 76 77 78 |
if (vma->vm_file) { /* * Subtle point from Rajesh Venkatasubramanian: before |
25d9e2d15
|
79 80 |
* moving file-based ptes, we must lock truncate_pagecache * out, since it might clean the dst vma before the src vma, |
1da177e4c
|
81 82 83 |
* and we propagate stale pages into the dst afterward. */ mapping = vma->vm_file->f_mapping; |
3d48ae45e
|
84 |
mutex_lock(&mapping->i_mmap_mutex); |
1da177e4c
|
85 |
} |
1da177e4c
|
86 |
|
4c21e2f24
|
87 88 89 90 |
/* * We don't have to worry about the ordering of src and dst * pte locks because exclusive mmap_sem prevents deadlock. */ |
c74df32c7
|
91 |
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); |
ece0e2b64
|
92 |
new_pte = pte_offset_map(new_pmd, new_addr); |
4c21e2f24
|
93 94 |
new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) |
f20dc5f7c
|
95 |
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
6606c3e0d
|
96 |
arch_enter_lazy_mmu_mode(); |
7be7a5469
|
97 98 99 100 101 |
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; |
7b6efc2bc
|
102 |
pte = ptep_get_and_clear(mm, old_addr, old_pte); |
7be7a5469
|
103 104 |
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); set_pte_at(mm, new_addr, new_pte, pte); |
1da177e4c
|
105 |
} |
7be7a5469
|
106 |
|
6606c3e0d
|
107 |
arch_leave_lazy_mmu_mode(); |
4c21e2f24
|
108 109 |
if (new_ptl != old_ptl) spin_unlock(new_ptl); |
ece0e2b64
|
110 |
pte_unmap(new_pte - 1); |
c74df32c7
|
111 |
pte_unmap_unlock(old_pte - 1, old_ptl); |
1da177e4c
|
112 |
if (mapping) |
3d48ae45e
|
113 |
mutex_unlock(&mapping->i_mmap_mutex); |
1da177e4c
|
114 |
} |
7be7a5469
|
115 |
#define LATENCY_LIMIT (64 * PAGE_SIZE) |
b6a2fea39
|
116 |
unsigned long move_page_tables(struct vm_area_struct *vma, |
1da177e4c
|
117 118 119 |
unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len) { |
7be7a5469
|
120 121 |
unsigned long extent, next, old_end; pmd_t *old_pmd, *new_pmd; |
7b6efc2bc
|
122 |
bool need_flush = false; |
1da177e4c
|
123 |
|
7be7a5469
|
124 125 |
old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); |
1da177e4c
|
126 |
|
7b6efc2bc
|
127 |
mmu_notifier_invalidate_range_start(vma->vm_mm, old_addr, old_end); |
7be7a5469
|
128 |
for (; old_addr < old_end; old_addr += extent, new_addr += extent) { |
1da177e4c
|
129 |
cond_resched(); |
7be7a5469
|
130 |
next = (old_addr + PMD_SIZE) & PMD_MASK; |
ebed48460
|
131 |
/* even if next overflowed, extent below will be ok */ |
7be7a5469
|
132 |
extent = next - old_addr; |
ebed48460
|
133 134 |
if (extent > old_end - old_addr) extent = old_end - old_addr; |
7be7a5469
|
135 136 137 |
old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; |
8ac1f8320
|
138 |
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); |
7be7a5469
|
139 140 |
if (!new_pmd) break; |
37a1c49a9
|
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
if (pmd_trans_huge(*old_pmd)) { int err = 0; if (extent == HPAGE_PMD_SIZE) err = move_huge_pmd(vma, new_vma, old_addr, new_addr, old_end, old_pmd, new_pmd); if (err > 0) { need_flush = true; continue; } else if (!err) { split_huge_page_pmd(vma->vm_mm, old_pmd); } VM_BUG_ON(pmd_trans_huge(*old_pmd)); } if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, new_pmd, new_addr)) break; |
7be7a5469
|
158 159 160 161 162 163 164 |
next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) extent = next - new_addr; if (extent > LATENCY_LIMIT) extent = LATENCY_LIMIT; move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, new_pmd, new_addr); |
7b6efc2bc
|
165 |
need_flush = true; |
1da177e4c
|
166 |
} |
7b6efc2bc
|
167 168 169 170 |
if (likely(need_flush)) flush_tlb_range(vma, old_end-len, old_addr); mmu_notifier_invalidate_range_end(vma->vm_mm, old_end-len, old_end); |
7be7a5469
|
171 172 |
return len + old_addr - old_end; /* how much done */ |
1da177e4c
|
173 174 175 176 177 178 179 180 181 182 183 184 |
} static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; unsigned long excess = 0; |
365e9c87a
|
185 |
unsigned long hiwater_vm; |
1da177e4c
|
186 |
int split = 0; |
7103ad323
|
187 |
int err; |
1da177e4c
|
188 189 190 191 192 193 194 |
/* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; |
1ff829957
|
195 196 197 198 199 200 201 |
/* * Advise KSM to break any KSM pages in the area to be moved: * it would be confusing if they were to turn up at the new * location, where they happen to coincide with different KSM * pages recently unmapped. But leave vma->vm_flags as it was, * so KSM can come around to merge on vma and new_vma afterwards. */ |
7103ad323
|
202 203 204 205 |
err = ksm_madvise(vma, old_addr, old_addr + old_len, MADV_UNMERGEABLE, &vm_flags); if (err) return err; |
1ff829957
|
206 |
|
1da177e4c
|
207 208 209 210 211 212 213 214 |
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff); if (!new_vma) return -ENOMEM; moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); if (moved_len < old_len) { /* |
948f017b0
|
215 216 217 218 219 220 221 222 223 |
* Before moving the page tables from the new vma to * the old vma, we need to be sure the old vma is * queued after new vma in the same_anon_vma list to * prevent SMP races with rmap_walk (that could lead * rmap_walk to miss some page table). */ anon_vma_moveto_tail(vma); /* |
1da177e4c
|
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 |
* On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len); vma = new_vma; old_len = new_len; old_addr = new_addr; new_addr = -ENOMEM; } /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { vma->vm_flags &= ~VM_ACCOUNT; excess = vma->vm_end - vma->vm_start - old_len; if (old_addr > vma->vm_start && old_addr + old_len < vma->vm_end) split = 1; } |
717990629
|
243 |
/* |
365e9c87a
|
244 245 246 247 248 249 250 |
* If we failed to move page tables we still do total_vm increment * since do_munmap() will decrement it by old_len == new_len. * * Since total_vm is about to be raised artificially high for a * moment, we need to restore high watermark afterwards: if stats * are taken meanwhile, total_vm and hiwater_vm appear too high. * If this were a serious issue, we'd add a flag to do_munmap(). |
717990629
|
251 |
*/ |
365e9c87a
|
252 |
hiwater_vm = mm->hiwater_vm; |
717990629
|
253 |
mm->total_vm += new_len >> PAGE_SHIFT; |
ab50b8ed8
|
254 |
vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); |
717990629
|
255 |
|
1da177e4c
|
256 257 258 259 260 |
if (do_munmap(mm, old_addr, old_len) < 0) { /* OOM: unable to split vma, just get accounts right */ vm_unacct_memory(excess >> PAGE_SHIFT); excess = 0; } |
365e9c87a
|
261 |
mm->hiwater_vm = hiwater_vm; |
1da177e4c
|
262 263 264 265 266 267 268 |
/* Restore VM_ACCOUNT if one or two pieces of vma left */ if (excess) { vma->vm_flags |= VM_ACCOUNT; if (split) vma->vm_next->vm_flags |= VM_ACCOUNT; } |
1da177e4c
|
269 270 271 |
if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; if (new_len > old_len) |
ba470de43
|
272 273 |
mlock_vma_pages_range(new_vma, new_addr + old_len, new_addr + new_len); |
1da177e4c
|
274 275 276 277 |
} return new_addr; } |
54f5de709
|
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 |
static struct vm_area_struct *vma_to_resize(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long *p) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto Efault; if (is_vm_hugetlb_page(vma)) goto Einval; /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto Efault; |
982134ba6
|
293 294 295 296 297 |
/* Need to be careful about a growing mapping */ if (new_len > old_len) { unsigned long pgoff; if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) |
54f5de709
|
298 |
goto Efault; |
982134ba6
|
299 300 301 302 |
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) goto Einval; |
54f5de709
|
303 304 305 306 307 |
} if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; |
59e99e5b9
|
308 |
lock_limit = rlimit(RLIMIT_MEMLOCK); |
54f5de709
|
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 |
locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto Eagain; } if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) goto Enomem; if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; if (security_vm_enough_memory(charged)) goto Efault; *p = charged; } return vma; Efault: /* very odd choice for most of the cases, but... */ return ERR_PTR(-EFAULT); Einval: return ERR_PTR(-EINVAL); Enomem: return ERR_PTR(-ENOMEM); Eagain: return ERR_PTR(-EAGAIN); } |
ecc1a8993
|
335 336 337 338 339 340 341 342 |
static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; |
097eed103
|
343 |
unsigned long map_flags; |
ecc1a8993
|
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 |
if (new_addr & ~PAGE_MASK) goto out; if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; /* Check if the location we're moving into overlaps the * old location at all, and fail if it does. */ if ((new_addr <= addr) && (new_addr+new_len) > addr) goto out; if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; ret = do_munmap(mm, new_addr, new_len); if (ret) goto out; if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; old_len = new_len; } vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } |
097eed103
|
380 381 382 |
map_flags = MAP_FIXED; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; |
9206de95b
|
383 |
|
097eed103
|
384 385 386 |
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); |
ecc1a8993
|
387 |
if (ret & ~PAGE_MASK) |
097eed103
|
388 389 390 391 392 393 394 |
goto out1; ret = move_vma(vma, addr, old_len, new_len, new_addr); if (!(ret & ~PAGE_MASK)) goto out; out1: vm_unacct_memory(charged); |
ecc1a8993
|
395 396 397 398 |
out: return ret; } |
1a0ef85f8
|
399 400 |
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { |
f106af4e9
|
401 |
unsigned long end = vma->vm_end + delta; |
9206de95b
|
402 |
if (end < vma->vm_end) /* overflow */ |
f106af4e9
|
403 |
return 0; |
9206de95b
|
404 |
if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ |
f106af4e9
|
405 406 407 |
return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) |
1a0ef85f8
|
408 |
return 0; |
1a0ef85f8
|
409 410 |
return 1; } |
1da177e4c
|
411 412 413 414 415 416 417 418 419 420 421 |
/* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { |
d0de32d9b
|
422 |
struct mm_struct *mm = current->mm; |
1da177e4c
|
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 |
struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; if (addr & ~PAGE_MASK) goto out; old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); /* * We allow a zero old-len as a special case * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ if (!new_len) goto out; |
1da177e4c
|
443 |
if (flags & MREMAP_FIXED) { |
ecc1a8993
|
444 445 446 |
if (flags & MREMAP_MAYMOVE) ret = mremap_to(addr, old_len, new_addr, new_len); goto out; |
1da177e4c
|
447 448 449 450 451 452 453 454 |
} /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. * do_munmap does all the needed commit accounting */ if (old_len >= new_len) { |
d0de32d9b
|
455 |
ret = do_munmap(mm, addr+new_len, old_len - new_len); |
1da177e4c
|
456 457 458 |
if (ret && old_len != new_len) goto out; ret = addr; |
ecc1a8993
|
459 |
goto out; |
1da177e4c
|
460 461 462 |
} /* |
ecc1a8993
|
463 |
* Ok, we need to grow.. |
1da177e4c
|
464 |
*/ |
54f5de709
|
465 466 467 |
vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); |
1da177e4c
|
468 |
goto out; |
119f657c7
|
469 |
} |
1da177e4c
|
470 |
|
1da177e4c
|
471 |
/* old_len exactly to the end of the area.. |
1da177e4c
|
472 |
*/ |
ecc1a8993
|
473 |
if (old_len == vma->vm_end - addr) { |
1da177e4c
|
474 |
/* can we just expand the current mapping? */ |
1a0ef85f8
|
475 |
if (vma_expandable(vma, new_len - old_len)) { |
1da177e4c
|
476 |
int pages = (new_len - old_len) >> PAGE_SHIFT; |
5beb49305
|
477 478 479 480 481 |
if (vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL)) { ret = -ENOMEM; goto out; } |
1da177e4c
|
482 |
|
d0de32d9b
|
483 484 |
mm->total_vm += pages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); |
1da177e4c
|
485 |
if (vma->vm_flags & VM_LOCKED) { |
d0de32d9b
|
486 |
mm->locked_vm += pages; |
ba470de43
|
487 |
mlock_vma_pages_range(vma, addr + old_len, |
1da177e4c
|
488 489 490 491 492 493 494 495 496 497 498 499 500 |
addr + new_len); } ret = addr; goto out; } } /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { |
ecc1a8993
|
501 502 503 504 505 |
unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; new_addr = get_unmapped_area(vma->vm_file, 0, new_len, |
935874141
|
506 507 508 |
vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); |
ecc1a8993
|
509 510 511 |
if (new_addr & ~PAGE_MASK) { ret = new_addr; goto out; |
1da177e4c
|
512 |
} |
ecc1a8993
|
513 514 515 516 |
ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; |
1da177e4c
|
517 518 519 520 521 |
ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: if (ret & ~PAGE_MASK) vm_unacct_memory(charged); |
1da177e4c
|
522 523 |
return ret; } |
6a6160a7b
|
524 525 526 |
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) |
1da177e4c
|
527 528 529 530 531 532 533 534 |
{ unsigned long ret; down_write(¤t->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(¤t->mm->mmap_sem); return ret; } |