Blame view

mm/mremap.c 13 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
  /*
   *	mm/mremap.c
   *
   *	(C) Copyright 1996 Linus Torvalds
   *
046c68842   Alan Cox   mm: update my add...
6
   *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
7
8
9
10
11
   *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
   */
  
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
12
  #include <linux/shm.h>
1ff829957   Hugh Dickins   ksm: prevent mrem...
13
  #include <linux/ksm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
15
  #include <linux/mman.h>
  #include <linux/swap.h>
c59ede7b7   Randy.Dunlap   [PATCH] move capa...
16
  #include <linux/capability.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17
18
19
20
  #include <linux/fs.h>
  #include <linux/highmem.h>
  #include <linux/security.h>
  #include <linux/syscalls.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
21
  #include <linux/mmu_notifier.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22
23
24
25
  
  #include <asm/uaccess.h>
  #include <asm/cacheflush.h>
  #include <asm/tlbflush.h>
ba470de43   Rik van Riel   mmap: handle mloc...
26
  #include "internal.h"
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
27
  static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
  {
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  
  	pgd = pgd_offset(mm, addr);
  	if (pgd_none_or_clear_bad(pgd))
  		return NULL;
  
  	pud = pud_offset(pgd, addr);
  	if (pud_none_or_clear_bad(pud))
  		return NULL;
  
  	pmd = pmd_offset(pud, addr);
  	if (pmd_none_or_clear_bad(pmd))
  		return NULL;
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
44
  	return pmd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
45
  }
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
46
  static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
47
48
49
  {
  	pgd_t *pgd;
  	pud_t *pud;
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
50
  	pmd_t *pmd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
51
52
  
  	pgd = pgd_offset(mm, addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
53
54
  	pud = pud_alloc(mm, pgd, addr);
  	if (!pud)
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
55
  		return NULL;
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
56

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
57
  	pmd = pmd_alloc(mm, pud, addr);
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
58
  	if (!pmd)
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
59
  		return NULL;
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
60

1bb3630e8   Hugh Dickins   [PATCH] mm: ptd_a...
61
  	if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
62
  		return NULL;
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
63
  	return pmd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
64
  }
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
65
66
67
68
  static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
  		unsigned long old_addr, unsigned long old_end,
  		struct vm_area_struct *new_vma, pmd_t *new_pmd,
  		unsigned long new_addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
69
70
71
  {
  	struct address_space *mapping = NULL;
  	struct mm_struct *mm = vma->vm_mm;
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
72
  	pte_t *old_pte, *new_pte, pte;
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
73
  	spinlock_t *old_ptl, *new_ptl;
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
74
  	unsigned long old_start;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
75

cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
76
77
78
  	old_start = old_addr;
  	mmu_notifier_invalidate_range_start(vma->vm_mm,
  					    old_start, old_end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
79
80
81
  	if (vma->vm_file) {
  		/*
  		 * Subtle point from Rajesh Venkatasubramanian: before
25d9e2d15   npiggin@suse.de   truncate: new hel...
82
83
  		 * moving file-based ptes, we must lock truncate_pagecache
  		 * out, since it might clean the dst vma before the src vma,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
84
85
86
87
88
89
90
91
  		 * and we propagate stale pages into the dst afterward.
  		 */
  		mapping = vma->vm_file->f_mapping;
  		spin_lock(&mapping->i_mmap_lock);
  		if (new_vma->vm_truncate_count &&
  		    new_vma->vm_truncate_count != vma->vm_truncate_count)
  			new_vma->vm_truncate_count = 0;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
92

4c21e2f24   Hugh Dickins   [PATCH] mm: split...
93
94
95
96
  	/*
  	 * We don't have to worry about the ordering of src and dst
  	 * pte locks because exclusive mmap_sem prevents deadlock.
  	 */
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
97
98
  	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
   	new_pte = pte_offset_map_nested(new_pmd, new_addr);
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
99
100
  	new_ptl = pte_lockptr(mm, new_pmd);
  	if (new_ptl != old_ptl)
f20dc5f7c   Ingo Molnar   [PATCH] lockdep: ...
101
  		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
102
  	arch_enter_lazy_mmu_mode();
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
103
104
105
106
107
108
  
  	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
  				   new_pte++, new_addr += PAGE_SIZE) {
  		if (pte_none(*old_pte))
  			continue;
  		pte = ptep_clear_flush(vma, old_addr, old_pte);
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
109
110
  		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
  		set_pte_at(mm, new_addr, new_pte, pte);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
111
  	}
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
112

6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
113
  	arch_leave_lazy_mmu_mode();
4c21e2f24   Hugh Dickins   [PATCH] mm: split...
114
115
  	if (new_ptl != old_ptl)
  		spin_unlock(new_ptl);
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
116
  	pte_unmap_nested(new_pte - 1);
c74df32c7   Hugh Dickins   [PATCH] mm: ptd_a...
117
  	pte_unmap_unlock(old_pte - 1, old_ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
118
119
  	if (mapping)
  		spin_unlock(&mapping->i_mmap_lock);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
120
  	mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
121
  }
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
122
  #define LATENCY_LIMIT	(64 * PAGE_SIZE)
b6a2fea39   Ollie Wild   mm: variable leng...
123
  unsigned long move_page_tables(struct vm_area_struct *vma,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
124
125
126
  		unsigned long old_addr, struct vm_area_struct *new_vma,
  		unsigned long new_addr, unsigned long len)
  {
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
127
128
  	unsigned long extent, next, old_end;
  	pmd_t *old_pmd, *new_pmd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
129

7be7a5469   Hugh Dickins   [PATCH] mm: move_...
130
131
  	old_end = old_addr + len;
  	flush_cache_range(vma, old_addr, old_end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
132

7be7a5469   Hugh Dickins   [PATCH] mm: move_...
133
  	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
134
  		cond_resched();
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  		next = (old_addr + PMD_SIZE) & PMD_MASK;
  		if (next - 1 > old_end)
  			next = old_end;
  		extent = next - old_addr;
  		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
  		if (!old_pmd)
  			continue;
  		new_pmd = alloc_new_pmd(vma->vm_mm, new_addr);
  		if (!new_pmd)
  			break;
  		next = (new_addr + PMD_SIZE) & PMD_MASK;
  		if (extent > next - new_addr)
  			extent = next - new_addr;
  		if (extent > LATENCY_LIMIT)
  			extent = LATENCY_LIMIT;
  		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
  				new_vma, new_pmd, new_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
152
  	}
7be7a5469   Hugh Dickins   [PATCH] mm: move_...
153
154
  
  	return len + old_addr - old_end;	/* how much done */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
155
156
157
158
159
160
161
162
163
164
165
166
  }
  
  static unsigned long move_vma(struct vm_area_struct *vma,
  		unsigned long old_addr, unsigned long old_len,
  		unsigned long new_len, unsigned long new_addr)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	struct vm_area_struct *new_vma;
  	unsigned long vm_flags = vma->vm_flags;
  	unsigned long new_pgoff;
  	unsigned long moved_len;
  	unsigned long excess = 0;
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
167
  	unsigned long hiwater_vm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
168
  	int split = 0;
7103ad323   Hugh Dickins   ksm: mremap use e...
169
  	int err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
170
171
172
173
174
175
176
  
  	/*
  	 * We'd prefer to avoid failure later on in do_munmap:
  	 * which may split one vma into three before unmapping.
  	 */
  	if (mm->map_count >= sysctl_max_map_count - 3)
  		return -ENOMEM;
1ff829957   Hugh Dickins   ksm: prevent mrem...
177
178
179
180
181
182
183
  	/*
  	 * Advise KSM to break any KSM pages in the area to be moved:
  	 * it would be confusing if they were to turn up at the new
  	 * location, where they happen to coincide with different KSM
  	 * pages recently unmapped.  But leave vma->vm_flags as it was,
  	 * so KSM can come around to merge on vma and new_vma afterwards.
  	 */
7103ad323   Hugh Dickins   ksm: mremap use e...
184
185
186
187
  	err = ksm_madvise(vma, old_addr, old_addr + old_len,
  						MADV_UNMERGEABLE, &vm_flags);
  	if (err)
  		return err;
1ff829957   Hugh Dickins   ksm: prevent mrem...
188

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
  	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
  	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
  	if (!new_vma)
  		return -ENOMEM;
  
  	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
  	if (moved_len < old_len) {
  		/*
  		 * On error, move entries back from new area to old,
  		 * which will succeed since page tables still there,
  		 * and then proceed to unmap new area instead of old.
  		 */
  		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len);
  		vma = new_vma;
  		old_len = new_len;
  		old_addr = new_addr;
  		new_addr = -ENOMEM;
  	}
  
  	/* Conceal VM_ACCOUNT so old reservation is not undone */
  	if (vm_flags & VM_ACCOUNT) {
  		vma->vm_flags &= ~VM_ACCOUNT;
  		excess = vma->vm_end - vma->vm_start - old_len;
  		if (old_addr > vma->vm_start &&
  		    old_addr + old_len < vma->vm_end)
  			split = 1;
  	}
717990629   Kirill Korotaev   [PATCH] mm acct a...
216
  	/*
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
217
218
219
220
221
222
223
  	 * If we failed to move page tables we still do total_vm increment
  	 * since do_munmap() will decrement it by old_len == new_len.
  	 *
  	 * Since total_vm is about to be raised artificially high for a
  	 * moment, we need to restore high watermark afterwards: if stats
  	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
  	 * If this were a serious issue, we'd add a flag to do_munmap().
717990629   Kirill Korotaev   [PATCH] mm acct a...
224
  	 */
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
225
  	hiwater_vm = mm->hiwater_vm;
717990629   Kirill Korotaev   [PATCH] mm acct a...
226
  	mm->total_vm += new_len >> PAGE_SHIFT;
ab50b8ed8   Hugh Dickins   [PATCH] mm: vm_st...
227
  	vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
717990629   Kirill Korotaev   [PATCH] mm acct a...
228

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
229
230
231
232
233
  	if (do_munmap(mm, old_addr, old_len) < 0) {
  		/* OOM: unable to split vma, just get accounts right */
  		vm_unacct_memory(excess >> PAGE_SHIFT);
  		excess = 0;
  	}
365e9c87a   Hugh Dickins   [PATCH] mm: updat...
234
  	mm->hiwater_vm = hiwater_vm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
235
236
237
238
239
240
241
  
  	/* Restore VM_ACCOUNT if one or two pieces of vma left */
  	if (excess) {
  		vma->vm_flags |= VM_ACCOUNT;
  		if (split)
  			vma->vm_next->vm_flags |= VM_ACCOUNT;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
242
243
244
  	if (vm_flags & VM_LOCKED) {
  		mm->locked_vm += new_len >> PAGE_SHIFT;
  		if (new_len > old_len)
ba470de43   Rik van Riel   mmap: handle mloc...
245
246
  			mlock_vma_pages_range(new_vma, new_addr + old_len,
  						       new_addr + new_len);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
247
248
249
250
  	}
  
  	return new_addr;
  }
54f5de709   Al Viro   untangling do_mre...
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
  static struct vm_area_struct *vma_to_resize(unsigned long addr,
  	unsigned long old_len, unsigned long new_len, unsigned long *p)
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma = find_vma(mm, addr);
  
  	if (!vma || vma->vm_start > addr)
  		goto Efault;
  
  	if (is_vm_hugetlb_page(vma))
  		goto Einval;
  
  	/* We can't remap across vm area boundaries */
  	if (old_len > vma->vm_end - addr)
  		goto Efault;
  
  	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
  		if (new_len > old_len)
  			goto Efault;
  	}
  
  	if (vma->vm_flags & VM_LOCKED) {
  		unsigned long locked, lock_limit;
  		locked = mm->locked_vm << PAGE_SHIFT;
59e99e5b9   Jiri Slaby   mm: use rlimit he...
275
  		lock_limit = rlimit(RLIMIT_MEMLOCK);
54f5de709   Al Viro   untangling do_mre...
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
  		locked += new_len - old_len;
  		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  			goto Eagain;
  	}
  
  	if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
  		goto Enomem;
  
  	if (vma->vm_flags & VM_ACCOUNT) {
  		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
  		if (security_vm_enough_memory(charged))
  			goto Efault;
  		*p = charged;
  	}
  
  	return vma;
  
  Efault:	/* very odd choice for most of the cases, but... */
  	return ERR_PTR(-EFAULT);
  Einval:
  	return ERR_PTR(-EINVAL);
  Enomem:
  	return ERR_PTR(-ENOMEM);
  Eagain:
  	return ERR_PTR(-EAGAIN);
  }
ecc1a8993   Al Viro   do_mremap() untan...
302
303
304
305
306
307
308
309
  static unsigned long mremap_to(unsigned long addr,
  	unsigned long old_len, unsigned long new_addr,
  	unsigned long new_len)
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
  	unsigned long ret = -EINVAL;
  	unsigned long charged = 0;
097eed103   Al Viro   fix the arch chec...
310
  	unsigned long map_flags;
ecc1a8993   Al Viro   do_mremap() untan...
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
  
  	if (new_addr & ~PAGE_MASK)
  		goto out;
  
  	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
  		goto out;
  
  	/* Check if the location we're moving into overlaps the
  	 * old location at all, and fail if it does.
  	 */
  	if ((new_addr <= addr) && (new_addr+new_len) > addr)
  		goto out;
  
  	if ((addr <= new_addr) && (addr+old_len) > new_addr)
  		goto out;
  
  	ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
  	if (ret)
  		goto out;
  
  	ret = do_munmap(mm, new_addr, new_len);
  	if (ret)
  		goto out;
  
  	if (old_len >= new_len) {
  		ret = do_munmap(mm, addr+new_len, old_len - new_len);
  		if (ret && old_len != new_len)
  			goto out;
  		old_len = new_len;
  	}
  
  	vma = vma_to_resize(addr, old_len, new_len, &charged);
  	if (IS_ERR(vma)) {
  		ret = PTR_ERR(vma);
  		goto out;
  	}
097eed103   Al Viro   fix the arch chec...
347
348
349
  	map_flags = MAP_FIXED;
  	if (vma->vm_flags & VM_MAYSHARE)
  		map_flags |= MAP_SHARED;
9206de95b   Al Viro   Take arch_mmap_ch...
350

097eed103   Al Viro   fix the arch chec...
351
352
353
  	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
  				((addr - vma->vm_start) >> PAGE_SHIFT),
  				map_flags);
ecc1a8993   Al Viro   do_mremap() untan...
354
  	if (ret & ~PAGE_MASK)
097eed103   Al Viro   fix the arch chec...
355
356
357
358
359
360
361
  		goto out1;
  
  	ret = move_vma(vma, addr, old_len, new_len, new_addr);
  	if (!(ret & ~PAGE_MASK))
  		goto out;
  out1:
  	vm_unacct_memory(charged);
ecc1a8993   Al Viro   do_mremap() untan...
362
363
364
365
  
  out:
  	return ret;
  }
1a0ef85f8   Al Viro   do_mremap() untan...
366
367
  static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
  {
f106af4e9   Al Viro   fix checks for ex...
368
  	unsigned long end = vma->vm_end + delta;
9206de95b   Al Viro   Take arch_mmap_ch...
369
  	if (end < vma->vm_end) /* overflow */
f106af4e9   Al Viro   fix checks for ex...
370
  		return 0;
9206de95b   Al Viro   Take arch_mmap_ch...
371
  	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
f106af4e9   Al Viro   fix checks for ex...
372
373
374
  		return 0;
  	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
  			      0, MAP_FIXED) & ~PAGE_MASK)
1a0ef85f8   Al Viro   do_mremap() untan...
375
  		return 0;
1a0ef85f8   Al Viro   do_mremap() untan...
376
377
  	return 1;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
378
379
380
381
382
383
384
385
386
387
388
  /*
   * Expand (or shrink) an existing mapping, potentially moving it at the
   * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
   *
   * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
   * This option implies MREMAP_MAYMOVE.
   */
  unsigned long do_mremap(unsigned long addr,
  	unsigned long old_len, unsigned long new_len,
  	unsigned long flags, unsigned long new_addr)
  {
d0de32d9b   Hugh Dickins   [PATCH] mm: do_mr...
389
  	struct mm_struct *mm = current->mm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
  	struct vm_area_struct *vma;
  	unsigned long ret = -EINVAL;
  	unsigned long charged = 0;
  
  	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
  		goto out;
  
  	if (addr & ~PAGE_MASK)
  		goto out;
  
  	old_len = PAGE_ALIGN(old_len);
  	new_len = PAGE_ALIGN(new_len);
  
  	/*
  	 * We allow a zero old-len as a special case
  	 * for DOS-emu "duplicate shm area" thing. But
  	 * a zero new-len is nonsensical.
  	 */
  	if (!new_len)
  		goto out;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
410
  	if (flags & MREMAP_FIXED) {
ecc1a8993   Al Viro   do_mremap() untan...
411
412
413
  		if (flags & MREMAP_MAYMOVE)
  			ret = mremap_to(addr, old_len, new_addr, new_len);
  		goto out;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
414
415
416
417
418
419
420
421
  	}
  
  	/*
  	 * Always allow a shrinking remap: that just unmaps
  	 * the unnecessary pages..
  	 * do_munmap does all the needed commit accounting
  	 */
  	if (old_len >= new_len) {
d0de32d9b   Hugh Dickins   [PATCH] mm: do_mr...
422
  		ret = do_munmap(mm, addr+new_len, old_len - new_len);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
423
424
425
  		if (ret && old_len != new_len)
  			goto out;
  		ret = addr;
ecc1a8993   Al Viro   do_mremap() untan...
426
  		goto out;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
427
428
429
  	}
  
  	/*
ecc1a8993   Al Viro   do_mremap() untan...
430
  	 * Ok, we need to grow..
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
431
  	 */
54f5de709   Al Viro   untangling do_mre...
432
433
434
  	vma = vma_to_resize(addr, old_len, new_len, &charged);
  	if (IS_ERR(vma)) {
  		ret = PTR_ERR(vma);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
435
  		goto out;
119f657c7   Andrew Morton   [PATCH] RLIMIT_AS...
436
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
437

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
438
  	/* old_len exactly to the end of the area..
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
439
  	 */
ecc1a8993   Al Viro   do_mremap() untan...
440
  	if (old_len == vma->vm_end - addr) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
441
  		/* can we just expand the current mapping? */
1a0ef85f8   Al Viro   do_mremap() untan...
442
  		if (vma_expandable(vma, new_len - old_len)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
443
  			int pages = (new_len - old_len) >> PAGE_SHIFT;
5beb49305   Rik van Riel   mm: change anon_v...
444
445
446
447
448
  			if (vma_adjust(vma, vma->vm_start, addr + new_len,
  				       vma->vm_pgoff, NULL)) {
  				ret = -ENOMEM;
  				goto out;
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
449

d0de32d9b   Hugh Dickins   [PATCH] mm: do_mr...
450
451
  			mm->total_vm += pages;
  			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
452
  			if (vma->vm_flags & VM_LOCKED) {
d0de32d9b   Hugh Dickins   [PATCH] mm: do_mr...
453
  				mm->locked_vm += pages;
ba470de43   Rik van Riel   mmap: handle mloc...
454
  				mlock_vma_pages_range(vma, addr + old_len,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
455
456
457
458
459
460
461
462
463
464
465
466
467
  						   addr + new_len);
  			}
  			ret = addr;
  			goto out;
  		}
  	}
  
  	/*
  	 * We weren't able to just expand or shrink the area,
  	 * we need to create a new one and move it..
  	 */
  	ret = -ENOMEM;
  	if (flags & MREMAP_MAYMOVE) {
ecc1a8993   Al Viro   do_mremap() untan...
468
469
470
471
472
  		unsigned long map_flags = 0;
  		if (vma->vm_flags & VM_MAYSHARE)
  			map_flags |= MAP_SHARED;
  
  		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
935874141   Al Viro   fix pgoff in "hav...
473
474
475
  					vma->vm_pgoff +
  					((addr - vma->vm_start) >> PAGE_SHIFT),
  					map_flags);
ecc1a8993   Al Viro   do_mremap() untan...
476
477
478
  		if (new_addr & ~PAGE_MASK) {
  			ret = new_addr;
  			goto out;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
479
  		}
ecc1a8993   Al Viro   do_mremap() untan...
480
481
482
483
  
  		ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
  		if (ret)
  			goto out;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
484
485
486
487
488
  		ret = move_vma(vma, addr, old_len, new_len, new_addr);
  	}
  out:
  	if (ret & ~PAGE_MASK)
  		vm_unacct_memory(charged);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
489
490
  	return ret;
  }
6a6160a7b   Heiko Carstens   [CVE-2009-0029] S...
491
492
493
  SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
  		unsigned long, new_len, unsigned long, flags,
  		unsigned long, new_addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
494
495
496
497
498
499
500
501
  {
  	unsigned long ret;
  
  	down_write(&current->mm->mmap_sem);
  	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
  	up_write(&current->mm->mmap_sem);
  	return ret;
  }