Blame view
mm/mremap.c
14.8 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 |
/* * mm/mremap.c * * (C) Copyright 1996 Linus Torvalds * |
046c68842 mm: update my add... |
6 |
* Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4c Linux-2.6.12-rc2 |
7 8 9 10 11 |
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> |
1da177e4c Linux-2.6.12-rc2 |
12 |
#include <linux/shm.h> |
1ff829957 ksm: prevent mrem... |
13 |
#include <linux/ksm.h> |
1da177e4c Linux-2.6.12-rc2 |
14 15 |
#include <linux/mman.h> #include <linux/swap.h> |
c59ede7b7 [PATCH] move capa... |
16 |
#include <linux/capability.h> |
1da177e4c Linux-2.6.12-rc2 |
17 |
#include <linux/fs.h> |
6dec97dc9 mm: move_ptes -- ... |
18 |
#include <linux/swapops.h> |
1da177e4c Linux-2.6.12-rc2 |
19 20 21 |
#include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> |
cddb8a5c1 mmu-notifiers: core |
22 |
#include <linux/mmu_notifier.h> |
cf4aebc29 sched: Move sched... |
23 |
#include <linux/sched/sysctl.h> |
1da177e4c Linux-2.6.12-rc2 |
24 25 26 27 |
#include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> |
ba470de43 mmap: handle mloc... |
28 |
#include "internal.h" |
7be7a5469 [PATCH] mm: move_... |
29 |
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) |
1da177e4c Linux-2.6.12-rc2 |
30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
{ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) return NULL; pmd = pmd_offset(pud, addr); |
37a1c49a9 thp: mremap suppo... |
44 |
if (pmd_none(*pmd)) |
1da177e4c Linux-2.6.12-rc2 |
45 |
return NULL; |
7be7a5469 [PATCH] mm: move_... |
46 |
return pmd; |
1da177e4c Linux-2.6.12-rc2 |
47 |
} |
8ac1f8320 thp: pte alloc tr... |
48 49 |
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) |
1da177e4c Linux-2.6.12-rc2 |
50 51 52 |
{ pgd_t *pgd; pud_t *pud; |
c74df32c7 [PATCH] mm: ptd_a... |
53 |
pmd_t *pmd; |
1da177e4c Linux-2.6.12-rc2 |
54 55 |
pgd = pgd_offset(mm, addr); |
1da177e4c Linux-2.6.12-rc2 |
56 57 |
pud = pud_alloc(mm, pgd, addr); if (!pud) |
c74df32c7 [PATCH] mm: ptd_a... |
58 |
return NULL; |
7be7a5469 [PATCH] mm: move_... |
59 |
|
1da177e4c Linux-2.6.12-rc2 |
60 |
pmd = pmd_alloc(mm, pud, addr); |
57a8f0cdb mm: revert mremap... |
61 |
if (!pmd) |
c74df32c7 [PATCH] mm: ptd_a... |
62 |
return NULL; |
7be7a5469 [PATCH] mm: move_... |
63 |
|
8ac1f8320 thp: pte alloc tr... |
64 |
VM_BUG_ON(pmd_trans_huge(*pmd)); |
c74df32c7 [PATCH] mm: ptd_a... |
65 |
|
7be7a5469 [PATCH] mm: move_... |
66 |
return pmd; |
1da177e4c Linux-2.6.12-rc2 |
67 |
} |
6dec97dc9 mm: move_ptes -- ... |
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
static pte_t move_soft_dirty_pte(pte_t pte) { /* * Set soft dirty bit so we can notice * in userspace the ptes were moved. */ #ifdef CONFIG_MEM_SOFT_DIRTY if (pte_present(pte)) pte = pte_mksoft_dirty(pte); else if (is_swap_pte(pte)) pte = pte_swp_mksoft_dirty(pte); else if (pte_file(pte)) pte = pte_file_mksoft_dirty(pte); #endif return pte; } |
7be7a5469 [PATCH] mm: move_... |
84 85 86 |
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, |
38a76013a mm: avoid taking ... |
87 |
unsigned long new_addr, bool need_rmap_locks) |
1da177e4c Linux-2.6.12-rc2 |
88 89 |
{ struct address_space *mapping = NULL; |
38a76013a mm: avoid taking ... |
90 |
struct anon_vma *anon_vma = NULL; |
1da177e4c Linux-2.6.12-rc2 |
91 |
struct mm_struct *mm = vma->vm_mm; |
7be7a5469 [PATCH] mm: move_... |
92 |
pte_t *old_pte, *new_pte, pte; |
4c21e2f24 [PATCH] mm: split... |
93 |
spinlock_t *old_ptl, *new_ptl; |
1da177e4c Linux-2.6.12-rc2 |
94 |
|
38a76013a mm: avoid taking ... |
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
/* * When need_rmap_locks is true, we take the i_mmap_mutex and anon_vma * locks to ensure that rmap will always observe either the old or the * new ptes. This is the easiest way to avoid races with * truncate_pagecache(), page migration, etc... * * When need_rmap_locks is false, we use other ways to avoid * such races: * * - During exec() shift_arg_pages(), we use a specially tagged vma * which rmap call sites look for using is_vma_temporary_stack(). * * - During mremap(), new_vma is often known to be placed after vma * in rmap traversal order. This ensures rmap will always observe * either the old pte, or the new pte, or both (the page table locks * serialize access to individual ptes, but only rmap traversal * order guarantees that we won't miss both the old and new ptes). */ if (need_rmap_locks) { if (vma->vm_file) { mapping = vma->vm_file->f_mapping; mutex_lock(&mapping->i_mmap_mutex); } if (vma->anon_vma) { anon_vma = vma->anon_vma; |
4fc3f1d66 mm/rmap, migratio... |
120 |
anon_vma_lock_write(anon_vma); |
38a76013a mm: avoid taking ... |
121 |
} |
1da177e4c Linux-2.6.12-rc2 |
122 |
} |
1da177e4c Linux-2.6.12-rc2 |
123 |
|
4c21e2f24 [PATCH] mm: split... |
124 125 126 127 |
/* * We don't have to worry about the ordering of src and dst * pte locks because exclusive mmap_sem prevents deadlock. */ |
c74df32c7 [PATCH] mm: ptd_a... |
128 |
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); |
ece0e2b64 mm: remove pte_*m... |
129 |
new_pte = pte_offset_map(new_pmd, new_addr); |
4c21e2f24 [PATCH] mm: split... |
130 131 |
new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) |
f20dc5f7c [PATCH] lockdep: ... |
132 |
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
6606c3e0d [PATCH] paravirt:... |
133 |
arch_enter_lazy_mmu_mode(); |
7be7a5469 [PATCH] mm: move_... |
134 135 136 137 138 |
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; |
7b6efc2bc mremap: avoid sen... |
139 |
pte = ptep_get_and_clear(mm, old_addr, old_pte); |
7be7a5469 [PATCH] mm: move_... |
140 |
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
6dec97dc9 mm: move_ptes -- ... |
141 142 |
pte = move_soft_dirty_pte(pte); set_pte_at(mm, new_addr, new_pte, pte); |
1da177e4c Linux-2.6.12-rc2 |
143 |
} |
7be7a5469 [PATCH] mm: move_... |
144 |
|
6606c3e0d [PATCH] paravirt:... |
145 |
arch_leave_lazy_mmu_mode(); |
4c21e2f24 [PATCH] mm: split... |
146 147 |
if (new_ptl != old_ptl) spin_unlock(new_ptl); |
ece0e2b64 mm: remove pte_*m... |
148 |
pte_unmap(new_pte - 1); |
c74df32c7 [PATCH] mm: ptd_a... |
149 |
pte_unmap_unlock(old_pte - 1, old_ptl); |
108d6642a mm anon rmap: rem... |
150 |
if (anon_vma) |
08b52706d mm/rmap: rename a... |
151 |
anon_vma_unlock_write(anon_vma); |
1da177e4c Linux-2.6.12-rc2 |
152 |
if (mapping) |
3d48ae45e mm: Convert i_mma... |
153 |
mutex_unlock(&mapping->i_mmap_mutex); |
1da177e4c Linux-2.6.12-rc2 |
154 |
} |
7be7a5469 [PATCH] mm: move_... |
155 |
#define LATENCY_LIMIT (64 * PAGE_SIZE) |
b6a2fea39 mm: variable leng... |
156 |
unsigned long move_page_tables(struct vm_area_struct *vma, |
1da177e4c Linux-2.6.12-rc2 |
157 |
unsigned long old_addr, struct vm_area_struct *new_vma, |
38a76013a mm: avoid taking ... |
158 159 |
unsigned long new_addr, unsigned long len, bool need_rmap_locks) |
1da177e4c Linux-2.6.12-rc2 |
160 |
{ |
7be7a5469 [PATCH] mm: move_... |
161 162 |
unsigned long extent, next, old_end; pmd_t *old_pmd, *new_pmd; |
7b6efc2bc mremap: avoid sen... |
163 |
bool need_flush = false; |
2ec74c3ef mm: move all mmu ... |
164 165 |
unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ |
1da177e4c Linux-2.6.12-rc2 |
166 |
|
7be7a5469 [PATCH] mm: move_... |
167 168 |
old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); |
1da177e4c Linux-2.6.12-rc2 |
169 |
|
2ec74c3ef mm: move all mmu ... |
170 171 172 |
mmun_start = old_addr; mmun_end = old_end; mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); |
7b6efc2bc mremap: avoid sen... |
173 |
|
7be7a5469 [PATCH] mm: move_... |
174 |
for (; old_addr < old_end; old_addr += extent, new_addr += extent) { |
1da177e4c Linux-2.6.12-rc2 |
175 |
cond_resched(); |
7be7a5469 [PATCH] mm: move_... |
176 |
next = (old_addr + PMD_SIZE) & PMD_MASK; |
ebed48460 mremap: check for... |
177 |
/* even if next overflowed, extent below will be ok */ |
7be7a5469 [PATCH] mm: move_... |
178 |
extent = next - old_addr; |
ebed48460 mremap: check for... |
179 180 |
if (extent > old_end - old_addr) extent = old_end - old_addr; |
7be7a5469 [PATCH] mm: move_... |
181 182 183 |
old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; |
8ac1f8320 thp: pte alloc tr... |
184 |
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); |
7be7a5469 [PATCH] mm: move_... |
185 186 |
if (!new_pmd) break; |
37a1c49a9 thp: mremap suppo... |
187 188 189 190 191 192 193 194 195 196 |
if (pmd_trans_huge(*old_pmd)) { int err = 0; if (extent == HPAGE_PMD_SIZE) err = move_huge_pmd(vma, new_vma, old_addr, new_addr, old_end, old_pmd, new_pmd); if (err > 0) { need_flush = true; continue; } else if (!err) { |
e180377f1 thp: change split... |
197 |
split_huge_page_pmd(vma, old_addr, old_pmd); |
37a1c49a9 thp: mremap suppo... |
198 199 200 201 202 203 |
} VM_BUG_ON(pmd_trans_huge(*old_pmd)); } if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, new_pmd, new_addr)) break; |
7be7a5469 [PATCH] mm: move_... |
204 205 206 207 208 209 |
next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) extent = next - new_addr; if (extent > LATENCY_LIMIT) extent = LATENCY_LIMIT; move_ptes(vma, old_pmd, old_addr, old_addr + extent, |
38a76013a mm: avoid taking ... |
210 |
new_vma, new_pmd, new_addr, need_rmap_locks); |
7b6efc2bc mremap: avoid sen... |
211 |
need_flush = true; |
1da177e4c Linux-2.6.12-rc2 |
212 |
} |
7b6efc2bc mremap: avoid sen... |
213 214 |
if (likely(need_flush)) flush_tlb_range(vma, old_end-len, old_addr); |
2ec74c3ef mm: move all mmu ... |
215 |
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); |
7be7a5469 [PATCH] mm: move_... |
216 217 |
return len + old_addr - old_end; /* how much done */ |
1da177e4c Linux-2.6.12-rc2 |
218 219 220 221 |
} static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, |
81909b842 mm: use mm_popula... |
222 |
unsigned long new_len, unsigned long new_addr, bool *locked) |
1da177e4c Linux-2.6.12-rc2 |
223 224 225 226 227 228 229 |
{ struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; unsigned long excess = 0; |
365e9c87a [PATCH] mm: updat... |
230 |
unsigned long hiwater_vm; |
1da177e4c Linux-2.6.12-rc2 |
231 |
int split = 0; |
7103ad323 ksm: mremap use e... |
232 |
int err; |
38a76013a mm: avoid taking ... |
233 |
bool need_rmap_locks; |
1da177e4c Linux-2.6.12-rc2 |
234 235 236 237 238 239 240 |
/* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; |
1ff829957 ksm: prevent mrem... |
241 242 243 244 245 246 247 |
/* * Advise KSM to break any KSM pages in the area to be moved: * it would be confusing if they were to turn up at the new * location, where they happen to coincide with different KSM * pages recently unmapped. But leave vma->vm_flags as it was, * so KSM can come around to merge on vma and new_vma afterwards. */ |
7103ad323 ksm: mremap use e... |
248 249 250 251 |
err = ksm_madvise(vma, old_addr, old_addr + old_len, MADV_UNMERGEABLE, &vm_flags); if (err) return err; |
1ff829957 ksm: prevent mrem... |
252 |
|
1da177e4c Linux-2.6.12-rc2 |
253 |
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); |
38a76013a mm: avoid taking ... |
254 255 |
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, &need_rmap_locks); |
1da177e4c Linux-2.6.12-rc2 |
256 257 |
if (!new_vma) return -ENOMEM; |
38a76013a mm: avoid taking ... |
258 259 |
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, need_rmap_locks); |
1da177e4c Linux-2.6.12-rc2 |
260 261 262 263 264 265 |
if (moved_len < old_len) { /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ |
38a76013a mm: avoid taking ... |
266 267 |
move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, true); |
1da177e4c Linux-2.6.12-rc2 |
268 269 270 271 272 273 274 275 276 277 278 279 280 281 |
vma = new_vma; old_len = new_len; old_addr = new_addr; new_addr = -ENOMEM; } /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { vma->vm_flags &= ~VM_ACCOUNT; excess = vma->vm_end - vma->vm_start - old_len; if (old_addr > vma->vm_start && old_addr + old_len < vma->vm_end) split = 1; } |
717990629 [PATCH] mm acct a... |
282 |
/* |
365e9c87a [PATCH] mm: updat... |
283 284 285 286 287 288 289 |
* If we failed to move page tables we still do total_vm increment * since do_munmap() will decrement it by old_len == new_len. * * Since total_vm is about to be raised artificially high for a * moment, we need to restore high watermark afterwards: if stats * are taken meanwhile, total_vm and hiwater_vm appear too high. * If this were a serious issue, we'd add a flag to do_munmap(). |
717990629 [PATCH] mm acct a... |
290 |
*/ |
365e9c87a [PATCH] mm: updat... |
291 |
hiwater_vm = mm->hiwater_vm; |
ab50b8ed8 [PATCH] mm: vm_st... |
292 |
vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); |
717990629 [PATCH] mm acct a... |
293 |
|
1da177e4c Linux-2.6.12-rc2 |
294 295 296 297 298 |
if (do_munmap(mm, old_addr, old_len) < 0) { /* OOM: unable to split vma, just get accounts right */ vm_unacct_memory(excess >> PAGE_SHIFT); excess = 0; } |
365e9c87a [PATCH] mm: updat... |
299 |
mm->hiwater_vm = hiwater_vm; |
1da177e4c Linux-2.6.12-rc2 |
300 301 302 303 304 305 306 |
/* Restore VM_ACCOUNT if one or two pieces of vma left */ if (excess) { vma->vm_flags |= VM_ACCOUNT; if (split) vma->vm_next->vm_flags |= VM_ACCOUNT; } |
1da177e4c Linux-2.6.12-rc2 |
307 308 |
if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; |
81909b842 mm: use mm_popula... |
309 |
*locked = true; |
1da177e4c Linux-2.6.12-rc2 |
310 311 312 313 |
} return new_addr; } |
54f5de709 untangling do_mre... |
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 |
static struct vm_area_struct *vma_to_resize(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long *p) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto Efault; if (is_vm_hugetlb_page(vma)) goto Einval; /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto Efault; |
982134ba6 mm: avoid wrappin... |
329 330 331 332 333 |
/* Need to be careful about a growing mapping */ if (new_len > old_len) { unsigned long pgoff; if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) |
54f5de709 untangling do_mre... |
334 |
goto Efault; |
982134ba6 mm: avoid wrappin... |
335 336 337 338 |
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) goto Einval; |
54f5de709 untangling do_mre... |
339 340 341 342 343 |
} if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; |
59e99e5b9 mm: use rlimit he... |
344 |
lock_limit = rlimit(RLIMIT_MEMLOCK); |
54f5de709 untangling do_mre... |
345 346 347 348 349 350 351 352 353 354 |
locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto Eagain; } if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) goto Enomem; if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; |
191c54244 mm: collapse secu... |
355 |
if (security_vm_enough_memory_mm(mm, charged)) |
54f5de709 untangling do_mre... |
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 |
goto Efault; *p = charged; } return vma; Efault: /* very odd choice for most of the cases, but... */ return ERR_PTR(-EFAULT); Einval: return ERR_PTR(-EINVAL); Enomem: return ERR_PTR(-ENOMEM); Eagain: return ERR_PTR(-EAGAIN); } |
81909b842 mm: use mm_popula... |
371 372 |
static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len, bool *locked) |
ecc1a8993 do_mremap() untan... |
373 374 375 376 377 |
{ struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; |
097eed103 fix the arch chec... |
378 |
unsigned long map_flags; |
ecc1a8993 do_mremap() untan... |
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 |
if (new_addr & ~PAGE_MASK) goto out; if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; /* Check if the location we're moving into overlaps the * old location at all, and fail if it does. */ if ((new_addr <= addr) && (new_addr+new_len) > addr) goto out; if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; |
ecc1a8993 do_mremap() untan... |
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 |
ret = do_munmap(mm, new_addr, new_len); if (ret) goto out; if (old_len >= new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len); if (ret && old_len != new_len) goto out; old_len = new_len; } vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } |
097eed103 fix the arch chec... |
410 411 412 |
map_flags = MAP_FIXED; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; |
9206de95b Take arch_mmap_ch... |
413 |
|
097eed103 fix the arch chec... |
414 415 416 |
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); |
ecc1a8993 do_mremap() untan... |
417 |
if (ret & ~PAGE_MASK) |
097eed103 fix the arch chec... |
418 |
goto out1; |
81909b842 mm: use mm_popula... |
419 |
ret = move_vma(vma, addr, old_len, new_len, new_addr, locked); |
097eed103 fix the arch chec... |
420 421 422 423 |
if (!(ret & ~PAGE_MASK)) goto out; out1: vm_unacct_memory(charged); |
ecc1a8993 do_mremap() untan... |
424 425 426 427 |
out: return ret; } |
1a0ef85f8 do_mremap() untan... |
428 429 |
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { |
f106af4e9 fix checks for ex... |
430 |
unsigned long end = vma->vm_end + delta; |
9206de95b Take arch_mmap_ch... |
431 |
if (end < vma->vm_end) /* overflow */ |
f106af4e9 fix checks for ex... |
432 |
return 0; |
9206de95b Take arch_mmap_ch... |
433 |
if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ |
f106af4e9 fix checks for ex... |
434 435 436 |
return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) |
1a0ef85f8 do_mremap() untan... |
437 |
return 0; |
1a0ef85f8 do_mremap() untan... |
438 439 |
return 1; } |
1da177e4c Linux-2.6.12-rc2 |
440 441 442 443 444 445 446 |
/* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ |
63a81db13 merge do_mremap()... |
447 448 449 |
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) |
1da177e4c Linux-2.6.12-rc2 |
450 |
{ |
d0de32d9b [PATCH] mm: do_mr... |
451 |
struct mm_struct *mm = current->mm; |
1da177e4c Linux-2.6.12-rc2 |
452 453 454 |
struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; |
81909b842 mm: use mm_popula... |
455 |
bool locked = false; |
1da177e4c Linux-2.6.12-rc2 |
456 457 |
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) |
9a2458a63 mm: mremap: valid... |
458 459 460 461 |
return ret; if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) return ret; |
1da177e4c Linux-2.6.12-rc2 |
462 463 |
if (addr & ~PAGE_MASK) |
9a2458a63 mm: mremap: valid... |
464 |
return ret; |
1da177e4c Linux-2.6.12-rc2 |
465 466 467 468 469 470 471 472 473 474 |
old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); /* * We allow a zero old-len as a special case * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ if (!new_len) |
9a2458a63 mm: mremap: valid... |
475 476 477 |
return ret; down_write(¤t->mm->mmap_sem); |
1da177e4c Linux-2.6.12-rc2 |
478 |
|
1da177e4c Linux-2.6.12-rc2 |
479 |
if (flags & MREMAP_FIXED) { |
9a2458a63 mm: mremap: valid... |
480 481 |
ret = mremap_to(addr, old_len, new_addr, new_len, &locked); |
ecc1a8993 do_mremap() untan... |
482 |
goto out; |
1da177e4c Linux-2.6.12-rc2 |
483 484 485 486 487 488 489 490 |
} /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. * do_munmap does all the needed commit accounting */ if (old_len >= new_len) { |
d0de32d9b [PATCH] mm: do_mr... |
491 |
ret = do_munmap(mm, addr+new_len, old_len - new_len); |
1da177e4c Linux-2.6.12-rc2 |
492 493 494 |
if (ret && old_len != new_len) goto out; ret = addr; |
ecc1a8993 do_mremap() untan... |
495 |
goto out; |
1da177e4c Linux-2.6.12-rc2 |
496 497 498 |
} /* |
ecc1a8993 do_mremap() untan... |
499 |
* Ok, we need to grow.. |
1da177e4c Linux-2.6.12-rc2 |
500 |
*/ |
54f5de709 untangling do_mre... |
501 502 503 |
vma = vma_to_resize(addr, old_len, new_len, &charged); if (IS_ERR(vma)) { ret = PTR_ERR(vma); |
1da177e4c Linux-2.6.12-rc2 |
504 |
goto out; |
119f657c7 [PATCH] RLIMIT_AS... |
505 |
} |
1da177e4c Linux-2.6.12-rc2 |
506 |
|
1da177e4c Linux-2.6.12-rc2 |
507 |
/* old_len exactly to the end of the area.. |
1da177e4c Linux-2.6.12-rc2 |
508 |
*/ |
ecc1a8993 do_mremap() untan... |
509 |
if (old_len == vma->vm_end - addr) { |
1da177e4c Linux-2.6.12-rc2 |
510 |
/* can we just expand the current mapping? */ |
1a0ef85f8 do_mremap() untan... |
511 |
if (vma_expandable(vma, new_len - old_len)) { |
1da177e4c Linux-2.6.12-rc2 |
512 |
int pages = (new_len - old_len) >> PAGE_SHIFT; |
5beb49305 mm: change anon_v... |
513 514 515 516 517 |
if (vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL)) { ret = -ENOMEM; goto out; } |
1da177e4c Linux-2.6.12-rc2 |
518 |
|
d0de32d9b [PATCH] mm: do_mr... |
519 |
vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); |
1da177e4c Linux-2.6.12-rc2 |
520 |
if (vma->vm_flags & VM_LOCKED) { |
d0de32d9b [PATCH] mm: do_mr... |
521 |
mm->locked_vm += pages; |
81909b842 mm: use mm_popula... |
522 523 |
locked = true; new_addr = addr; |
1da177e4c Linux-2.6.12-rc2 |
524 525 526 527 528 529 530 531 532 533 534 535 |
} ret = addr; goto out; } } /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { |
ecc1a8993 do_mremap() untan... |
536 537 538 539 540 |
unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; new_addr = get_unmapped_area(vma->vm_file, 0, new_len, |
935874141 fix pgoff in "hav... |
541 542 543 |
vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); |
ecc1a8993 do_mremap() untan... |
544 545 546 |
if (new_addr & ~PAGE_MASK) { ret = new_addr; goto out; |
1da177e4c Linux-2.6.12-rc2 |
547 |
} |
ecc1a8993 do_mremap() untan... |
548 |
|
81909b842 mm: use mm_popula... |
549 |
ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked); |
1da177e4c Linux-2.6.12-rc2 |
550 551 552 553 |
} out: if (ret & ~PAGE_MASK) vm_unacct_memory(charged); |
1da177e4c Linux-2.6.12-rc2 |
554 |
up_write(¤t->mm->mmap_sem); |
81909b842 mm: use mm_popula... |
555 556 |
if (locked && new_len > old_len) mm_populate(new_addr + old_len, new_len - old_len); |
1da177e4c Linux-2.6.12-rc2 |
557 558 |
return ret; } |