Blame view
mm/mremap.c
13 KB
1da177e4c
|
1 2 3 4 5 |
/* * mm/mremap.c * * (C) Copyright 1996 Linus Torvalds * |
046c68842
|
6 |
* Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4c
|
7 8 9 10 11 |
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> |
1da177e4c
|
12 |
#include <linux/shm.h> |
1ff829957
|
13 |
#include <linux/ksm.h> |
1da177e4c
|
14 15 |
#include <linux/mman.h> #include <linux/swap.h> |
c59ede7b7
|
16 |
#include <linux/capability.h> |
1da177e4c
|
17 18 19 20 |
#include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> |
cddb8a5c1
|
21 |
#include <linux/mmu_notifier.h> |
1da177e4c
|
22 23 24 25 |
#include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> |
ba470de43
|
26 |
#include "internal.h" |
7be7a5469
|
27 |
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) |
1da177e4c
|
28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
{ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) return NULL; pmd = pmd_offset(pud, addr); |
bae9c19bf
|
42 |
split_huge_page_pmd(mm, pmd); |
1da177e4c
|
43 44 |
if (pmd_none_or_clear_bad(pmd)) return NULL; |
7be7a5469
|
45 |
return pmd; |
1da177e4c
|
46 |
} |
8ac1f8320
|
47 48 |
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) |
1da177e4c
|
49 50 51 |
{ pgd_t *pgd; pud_t *pud; |
c74df32c7
|
52 |
pmd_t *pmd; |
1da177e4c
|
53 54 |
pgd = pgd_offset(mm, addr); |
1da177e4c
|
55 56 |
pud = pud_alloc(mm, pgd, addr); if (!pud) |
c74df32c7
|
57 |
return NULL; |
7be7a5469
|
58 |
|
1da177e4c
|
59 |
pmd = pmd_alloc(mm, pud, addr); |
7be7a5469
|
60 |
if (!pmd) |
c74df32c7
|
61 |
return NULL; |
7be7a5469
|
62 |
|
8ac1f8320
|
63 64 |
VM_BUG_ON(pmd_trans_huge(*pmd)); if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr)) |
c74df32c7
|
65 |
return NULL; |
7be7a5469
|
66 |
return pmd; |
1da177e4c
|
67 |
} |
7be7a5469
|
68 69 70 71 |
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr) |
1da177e4c
|
72 73 74 |
{ struct address_space *mapping = NULL; struct mm_struct *mm = vma->vm_mm; |
7be7a5469
|
75 |
pte_t *old_pte, *new_pte, pte; |
4c21e2f24
|
76 |
spinlock_t *old_ptl, *new_ptl; |
cddb8a5c1
|
77 |
unsigned long old_start; |
1da177e4c
|
78 |
|
cddb8a5c1
|
79 80 81 |
old_start = old_addr; mmu_notifier_invalidate_range_start(vma->vm_mm, old_start, old_end); |
1da177e4c
|
82 83 84 |
if (vma->vm_file) { /* * Subtle point from Rajesh Venkatasubramanian: before |
25d9e2d15
|
85 86 |
* moving file-based ptes, we must lock truncate_pagecache * out, since it might clean the dst vma before the src vma, |
1da177e4c
|
87 88 89 90 |
* and we propagate stale pages into the dst afterward. */ mapping = vma->vm_file->f_mapping; spin_lock(&mapping->i_mmap_lock); |
a3e8cc643
|
91 |
new_vma->vm_truncate_count = 0; |
1da177e4c
|
92 |
} |
1da177e4c
|
93 |
|
4c21e2f24
|
94 95 96 97 |
/* * We don't have to worry about the ordering of src and dst * pte locks because exclusive mmap_sem prevents deadlock. */ |
c74df32c7
|
98 |
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); |
ece0e2b64
|
99 |
new_pte = pte_offset_map(new_pmd, new_addr); |
4c21e2f24
|
100 101 |
new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) |
f20dc5f7c
|
102 |
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
6606c3e0d
|
103 |
arch_enter_lazy_mmu_mode(); |
7be7a5469
|
104 105 106 107 108 109 |
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; pte = ptep_clear_flush(vma, old_addr, old_pte); |
7be7a5469
|
110 111 |
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); set_pte_at(mm, new_addr, new_pte, pte); |
1da177e4c
|
112 |
} |
7be7a5469
|
113 |
|
6606c3e0d
|
114 |
arch_leave_lazy_mmu_mode(); |
4c21e2f24
|
115 116 |
if (new_ptl != old_ptl) spin_unlock(new_ptl); |
ece0e2b64
|
117 |
pte_unmap(new_pte - 1); |
c74df32c7
|
118 |
pte_unmap_unlock(old_pte - 1, old_ptl); |
1da177e4c
|
119 120 |
if (mapping) spin_unlock(&mapping->i_mmap_lock); |
cddb8a5c1
|
121 |
mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); |
1da177e4c
|
122 |
} |
7be7a5469
|
123 |
#define LATENCY_LIMIT (64 * PAGE_SIZE) |
b6a2fea39
|
124 |
unsigned long move_page_tables(struct vm_area_struct *vma, |
1da177e4c
|
125 126 127 |
unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len) { |
7be7a5469
|
128 129 |
unsigned long extent, next, old_end; pmd_t *old_pmd, *new_pmd; |
1da177e4c
|
130 |
|
7be7a5469
|
131 132 |
old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); |
1da177e4c
|
133 |
|
7be7a5469
|
134 |
for (; old_addr < old_end; old_addr += extent, new_addr += extent) { |
1da177e4c
|
135 |
cond_resched(); |
7be7a5469
|
136 137 138 139 140 141 142 |
next = (old_addr + PMD_SIZE) & PMD_MASK; if (next - 1 > old_end) next = old_end; extent = next - old_addr; old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; |
8ac1f8320
|
143 |
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); |
7be7a5469
|
144 145 146 147 148 149 150 151 152 |
if (!new_pmd) break; next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) extent = next - new_addr; if (extent > LATENCY_LIMIT) extent = LATENCY_LIMIT; move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, new_pmd, new_addr); |
1da177e4c
|
153 |
} |
7be7a5469
|
154 155 |
return len + old_addr - old_end; /* how much done */ |
1da177e4c
|
156 157 158 159 160 161 162 163 164 165 166 167 |
} static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; unsigned long excess = 0; |
365e9c87a
|
168 |
unsigned long hiwater_vm; |
1da177e4c
|
169 |
int split = 0; |
7103ad323
|
170 |
int err; |
1da177e4c
|
171 172 173 174 175 176 177 |
/* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; |
1ff829957
|
178 179 180 181 182 183 184 |
/* * Advise KSM to break any KSM pages in the area to be moved: * it would be confusing if they were to turn up at the new * location, where they happen to coincide with different KSM * pages recently unmapped. But leave vma->vm_flags as it was, * so KSM can come around to merge on vma and new_vma afterwards. */ |
7103ad323
|
185 186 187 188 |
err = ksm_madvise(vma, old_addr, old_addr + old_len, MADV_UNMERGEABLE, &vm_flags); if (err) return err; |
1ff829957
|
189 |
|
1da177e4c
|
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff); if (!new_vma) return -ENOMEM; moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); if (moved_len < old_len) { /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len); vma = new_vma; old_len = new_len; old_addr = new_addr; new_addr = -ENOMEM; } /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { vma->vm_flags &= ~VM_ACCOUNT; excess = vma->vm_end - vma->vm_start - old_len; if (old_addr > vma->vm_start && old_addr + old_len < vma->vm_end) split = 1; } |
717990629
|
217 |
/* |
365e9c87a
|
218 219 220 221 222 223 224 |
* If we failed to move page tables we still do total_vm increment * since do_munmap() will decrement it by old_len == new_len. * * Since total_vm is about to be raised artificially high for a * moment, we need to restore high watermark afterwards: if stats * are taken meanwhile, total_vm and hiwater_vm appear too high. * If this were a serious issue, we'd add a flag to do_munmap(). |
717990629
|
225 |
*/ |
365e9c87a
|
226 |
hiwater_vm = mm->hiwater_vm; |
717990629
|
227 |
mm->total_vm += new_len >> PAGE_SHIFT; |
ab50b8ed8
|
228 |
vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); |
717990629
|
229 |
|
1da177e4c
|
230 231 232 233 234 |
if (do_munmap(mm, old_addr, old_len) < 0) { /* OOM: unable to split vma, just get accounts right */ vm_unacct_memory(excess >> PAGE_SHIFT); excess = 0; } |
365e9c87a
|
235 |
mm->hiwater_vm = hiwater_vm; |
1da177e4c
|
236 237 238 239 240 241 242 |
/* Restore VM_ACCOUNT if one or two pieces of vma left */ if (excess) { vma->vm_flags |= VM_ACCOUNT; if (split) vma->vm_next->vm_flags |= VM_ACCOUNT; } |
1da177e4c
|
243 244 245 |
if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; if (new_len > old_len) |
ba470de43
|
246 247 |
mlock_vma_pages_range(new_vma, new_addr + old_len, new_addr + new_len); |
1da177e4c
|
248 249 250 251 |
} return new_addr; } |
54f5de709
|
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 |
static struct vm_area_struct *vma_to_resize(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long *p) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto Efault; if (is_vm_hugetlb_page(vma)) goto Einval; /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto Efault; if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { if (new_len > old_len) goto Efault; } if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; |
59e99e5b9
|
276 |
lock_limit = rlimit(RLIMIT_MEMLOCK); |
54f5de709
|
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 |
locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto Eagain; } if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) goto Enomem; if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; if (security_vm_enough_memory(charged)) goto Efault; *p = charged; } return vma; Efault: /* very odd choice for most of the cases, but... */ return ERR_PTR(-EFAULT); Einval: return ERR_PTR(-EINVAL); Enomem: return ERR_PTR(-ENOMEM); Eagain: return ERR_PTR(-EAGAIN); } |
ecc1a8993
|
303 304 305 306 307 308 309 310 |
static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; |
097eed103
|
311 |
unsigned long map_flags; |
ecc1a8993
|
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 |
if (new_addr & ~PAGE_MASK) goto out; if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; /* Check if the location we're moving into overlaps the * old location at all, and fail if it does. */ if ((new_addr <= addr) && (new_addr+new_len) > addr) goto out; if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; ret = do_munmap(mm, new_addr, new_len); if (ret) goto out; if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; old_len = new_len; } vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } |
097eed103
|
348 349 350 |
map_flags = MAP_FIXED; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; |
9206de95b
|
351 |
|
097eed103
|
352 353 354 |
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); |
ecc1a8993
|
355 |
if (ret & ~PAGE_MASK) |
097eed103
|
356 357 358 359 360 361 362 |
goto out1; ret = move_vma(vma, addr, old_len, new_len, new_addr); if (!(ret & ~PAGE_MASK)) goto out; out1: vm_unacct_memory(charged); |
ecc1a8993
|
363 364 365 366 |
out: return ret; } |
1a0ef85f8
|
367 368 |
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { |
f106af4e9
|
369 |
unsigned long end = vma->vm_end + delta; |
9206de95b
|
370 |
if (end < vma->vm_end) /* overflow */ |
f106af4e9
|
371 |
return 0; |
9206de95b
|
372 |
if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ |
f106af4e9
|
373 374 375 |
return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) |
1a0ef85f8
|
376 |
return 0; |
1a0ef85f8
|
377 378 |
return 1; } |
1da177e4c
|
379 380 381 382 383 384 385 386 387 388 389 |
/* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { |
d0de32d9b
|
390 |
struct mm_struct *mm = current->mm; |
1da177e4c
|
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 |
struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; if (addr & ~PAGE_MASK) goto out; old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); /* * We allow a zero old-len as a special case * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ if (!new_len) goto out; |
1da177e4c
|
411 |
if (flags & MREMAP_FIXED) { |
ecc1a8993
|
412 413 414 |
if (flags & MREMAP_MAYMOVE) ret = mremap_to(addr, old_len, new_addr, new_len); goto out; |
1da177e4c
|
415 416 417 418 419 420 421 422 |
} /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. * do_munmap does all the needed commit accounting */ if (old_len >= new_len) { |
d0de32d9b
|
423 |
ret = do_munmap(mm, addr+new_len, old_len - new_len); |
1da177e4c
|
424 425 426 |
if (ret && old_len != new_len) goto out; ret = addr; |
ecc1a8993
|
427 |
goto out; |
1da177e4c
|
428 429 430 |
} /* |
ecc1a8993
|
431 |
* Ok, we need to grow.. |
1da177e4c
|
432 |
*/ |
54f5de709
|
433 434 435 |
vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); |
1da177e4c
|
436 |
goto out; |
119f657c7
|
437 |
} |
1da177e4c
|
438 |
|
1da177e4c
|
439 |
/* old_len exactly to the end of the area.. |
1da177e4c
|
440 |
*/ |
ecc1a8993
|
441 |
if (old_len == vma->vm_end - addr) { |
1da177e4c
|
442 |
/* can we just expand the current mapping? */ |
1a0ef85f8
|
443 |
if (vma_expandable(vma, new_len - old_len)) { |
1da177e4c
|
444 |
int pages = (new_len - old_len) >> PAGE_SHIFT; |
5beb49305
|
445 446 447 448 449 |
if (vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL)) { ret = -ENOMEM; goto out; } |
1da177e4c
|
450 |
|
d0de32d9b
|
451 452 |
mm->total_vm += pages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); |
1da177e4c
|
453 |
if (vma->vm_flags & VM_LOCKED) { |
d0de32d9b
|
454 |
mm->locked_vm += pages; |
ba470de43
|
455 |
mlock_vma_pages_range(vma, addr + old_len, |
1da177e4c
|
456 457 458 459 460 461 462 463 464 465 466 467 468 |
addr + new_len); } ret = addr; goto out; } } /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { |
ecc1a8993
|
469 470 471 472 473 |
unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; new_addr = get_unmapped_area(vma->vm_file, 0, new_len, |
935874141
|
474 475 476 |
vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); |
ecc1a8993
|
477 478 479 |
if (new_addr & ~PAGE_MASK) { ret = new_addr; goto out; |
1da177e4c
|
480 |
} |
ecc1a8993
|
481 482 483 484 |
ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); if (ret) goto out; |
1da177e4c
|
485 486 487 488 489 |
ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: if (ret & ~PAGE_MASK) vm_unacct_memory(charged); |
1da177e4c
|
490 491 |
return ret; } |
6a6160a7b
|
492 493 494 |
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) |
1da177e4c
|
495 496 497 498 499 500 501 502 |
{ unsigned long ret; down_write(¤t->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(¤t->mm->mmap_sem); return ret; } |