Blame view

mm/mprotect.c 9.59 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
  /*
   *  mm/mprotect.c
   *
   *  (C) Copyright 1994 Linus Torvalds
   *  (C) Copyright 2002 Christoph Hellwig
   *
046c68842   Alan Cox   mm: update my add...
7
   *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
   *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
   */
  
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
15
16
17
18
19
20
  #include <linux/shm.h>
  #include <linux/mman.h>
  #include <linux/fs.h>
  #include <linux/highmem.h>
  #include <linux/security.h>
  #include <linux/mempolicy.h>
  #include <linux/personality.h>
  #include <linux/syscalls.h>
0697212a4   Christoph Lameter   [PATCH] Swapless ...
21
22
  #include <linux/swap.h>
  #include <linux/swapops.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
23
  #include <linux/mmu_notifier.h>
64cdd548f   KOSAKI Motohiro   mm: cleanup: remo...
24
  #include <linux/migrate.h>
cdd6c482c   Ingo Molnar   perf: Do the big ...
25
  #include <linux/perf_event.h>
64a9a34e2   Mel Gorman   mm: numa: do not ...
26
  #include <linux/ksm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
27
28
29
30
  #include <asm/uaccess.h>
  #include <asm/pgtable.h>
  #include <asm/cacheflush.h>
  #include <asm/tlbflush.h>
1c12c4cf9   Venki Pallipadi   mprotect: prevent...
31
32
33
34
35
36
  #ifndef pgprot_modify
  static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  {
  	return newprot;
  }
  #endif
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
37
  static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
38
  		unsigned long addr, unsigned long end, pgprot_t newprot,
0f19c1792   Mel Gorman   mm: numa: Do not ...
39
  		int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
40
  {
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
41
  	struct mm_struct *mm = vma->vm_mm;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
42
  	pte_t *pte, oldpte;
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
43
  	spinlock_t *ptl;
7da4d641c   Peter Zijlstra   mm: Count the num...
44
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
45

705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
46
  	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
47
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
48
  	do {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
49
50
  		oldpte = *pte;
  		if (pte_present(oldpte)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
51
  			pte_t ptent;
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
52
  			bool updated = false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
53

4b10e7d56   Mel Gorman   mm: mempolicy: Im...
54
  			if (!prot_numa) {
0c5f83c23   Mel Gorman   mm: numa: do not ...
55
  				ptent = ptep_modify_prot_start(mm, addr, pte);
1667918b6   Mel Gorman   mm: numa: clear n...
56
57
  				if (pte_numa(ptent))
  					ptent = pte_mknonnuma(ptent);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
58
  				ptent = pte_modify(ptent, newprot);
9d85d5863   Aneesh Kumar K.V   mm: Dirty account...
59
60
61
62
63
64
65
  				/*
  				 * Avoid taking write faults for pages we
  				 * know to be dirty.
  				 */
  				if (dirty_accountable && pte_dirty(ptent))
  					ptent = pte_mkwrite(ptent);
  				ptep_modify_prot_commit(mm, addr, pte, ptent);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
66
67
68
69
70
  				updated = true;
  			} else {
  				struct page *page;
  
  				page = vm_normal_page(vma, addr, oldpte);
64a9a34e2   Mel Gorman   mm: numa: do not ...
71
  				if (page && !PageKsm(page)) {
1bc115d87   Mel Gorman   mm: numa: Scan pa...
72
  					if (!pte_numa(oldpte)) {
56eecdb91   Aneesh Kumar K.V   mm: Use ptep/pmdp...
73
  						ptep_set_numa(mm, addr, pte);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
74
75
76
77
  						updated = true;
  					}
  				}
  			}
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
78
79
  			if (updated)
  				pages++;
ce1744f4e   Konstantin Khlebnikov   mm: replace PAGE_...
80
  		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
81
82
83
  			swp_entry_t entry = pte_to_swp_entry(oldpte);
  
  			if (is_write_migration_entry(entry)) {
c3d16e165   Cyrill Gorcunov   mm: migration: do...
84
  				pte_t newpte;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
85
86
87
88
89
  				/*
  				 * A protection check is difficult so
  				 * just be safe and disable write
  				 */
  				make_migration_entry_read(&entry);
c3d16e165   Cyrill Gorcunov   mm: migration: do...
90
91
92
93
  				newpte = swp_entry_to_pte(entry);
  				if (pte_swp_soft_dirty(oldpte))
  					newpte = pte_swp_mksoft_dirty(newpte);
  				set_pte_at(mm, addr, pte, newpte);
e920e14ca   Mel Gorman   mm: Do not flush ...
94
95
  
  				pages++;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
96
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
97
98
  		}
  	} while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
99
  	arch_leave_lazy_mmu_mode();
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
100
  	pte_unmap_unlock(pte - 1, ptl);
7da4d641c   Peter Zijlstra   mm: Count the num...
101
102
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
103
  }
7d12efaea   Andrew Morton   mm/mprotect.c: co...
104
105
106
  static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
  		pud_t *pud, unsigned long addr, unsigned long end,
  		pgprot_t newprot, int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
107
108
109
  {
  	pmd_t *pmd;
  	unsigned long next;
7da4d641c   Peter Zijlstra   mm: Count the num...
110
  	unsigned long pages = 0;
72403b4a0   Mel Gorman   mm: numa: return ...
111
  	unsigned long nr_huge_updates = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
112
113
114
  
  	pmd = pmd_offset(pud, addr);
  	do {
25cbbef19   Mel Gorman   mm: numa: Trap pm...
115
  		unsigned long this_pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
116
  		next = pmd_addr_end(addr, end);
cd7548ab3   Johannes Weiner   thp: mprotect: tr...
117
118
  		if (pmd_trans_huge(*pmd)) {
  			if (next - addr != HPAGE_PMD_SIZE)
e180377f1   Kirill A. Shutemov   thp: change split...
119
  				split_huge_page_pmd(vma, addr, pmd);
f123d74ab   Mel Gorman   mm: Only flush TL...
120
121
122
123
124
  			else {
  				int nr_ptes = change_huge_pmd(vma, pmd, addr,
  						newprot, prot_numa);
  
  				if (nr_ptes) {
72403b4a0   Mel Gorman   mm: numa: return ...
125
126
127
128
  					if (nr_ptes == HPAGE_PMD_NR) {
  						pages += HPAGE_PMD_NR;
  						nr_huge_updates++;
  					}
f123d74ab   Mel Gorman   mm: Only flush TL...
129
130
  					continue;
  				}
7da4d641c   Peter Zijlstra   mm: Count the num...
131
  			}
cd7548ab3   Johannes Weiner   thp: mprotect: tr...
132
133
  			/* fall through */
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
134
135
  		if (pmd_none_or_clear_bad(pmd))
  			continue;
25cbbef19   Mel Gorman   mm: numa: Trap pm...
136
  		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
0f19c1792   Mel Gorman   mm: numa: Do not ...
137
  				 dirty_accountable, prot_numa);
25cbbef19   Mel Gorman   mm: numa: Trap pm...
138
  		pages += this_pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
139
  	} while (pmd++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
140

72403b4a0   Mel Gorman   mm: numa: return ...
141
142
  	if (nr_huge_updates)
  		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
7da4d641c   Peter Zijlstra   mm: Count the num...
143
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
144
  }
7d12efaea   Andrew Morton   mm/mprotect.c: co...
145
146
147
  static inline unsigned long change_pud_range(struct vm_area_struct *vma,
  		pgd_t *pgd, unsigned long addr, unsigned long end,
  		pgprot_t newprot, int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
148
149
150
  {
  	pud_t *pud;
  	unsigned long next;
7da4d641c   Peter Zijlstra   mm: Count the num...
151
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
152
153
154
155
156
157
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud))
  			continue;
7da4d641c   Peter Zijlstra   mm: Count the num...
158
  		pages += change_pmd_range(vma, pud, addr, next, newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
159
  				 dirty_accountable, prot_numa);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
160
  	} while (pud++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
161
162
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
163
  }
7da4d641c   Peter Zijlstra   mm: Count the num...
164
  static unsigned long change_protection_range(struct vm_area_struct *vma,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
165
  		unsigned long addr, unsigned long end, pgprot_t newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
166
  		int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
167
168
169
170
171
  {
  	struct mm_struct *mm = vma->vm_mm;
  	pgd_t *pgd;
  	unsigned long next;
  	unsigned long start = addr;
7da4d641c   Peter Zijlstra   mm: Count the num...
172
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
173
174
175
176
  
  	BUG_ON(addr >= end);
  	pgd = pgd_offset(mm, addr);
  	flush_cache_range(vma, addr, end);
208414059   Rik van Riel   mm: fix TLB flush...
177
  	set_tlb_flush_pending(mm);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
178
179
180
181
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
7da4d641c   Peter Zijlstra   mm: Count the num...
182
  		pages += change_pud_range(vma, pgd, addr, next, newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
183
  				 dirty_accountable, prot_numa);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
184
  	} while (pgd++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
185

1233d5882   Ingo Molnar   mm: Optimize the ...
186
187
188
  	/* Only flush the TLB if we actually modified any entries: */
  	if (pages)
  		flush_tlb_range(vma, start, end);
208414059   Rik van Riel   mm: fix TLB flush...
189
  	clear_tlb_flush_pending(mm);
7da4d641c   Peter Zijlstra   mm: Count the num...
190
191
192
193
194
195
  
  	return pages;
  }
  
  unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
  		       unsigned long end, pgprot_t newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
196
  		       int dirty_accountable, int prot_numa)
7da4d641c   Peter Zijlstra   mm: Count the num...
197
198
199
200
201
202
203
204
  {
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long pages;
  
  	mmu_notifier_invalidate_range_start(mm, start, end);
  	if (is_vm_hugetlb_page(vma))
  		pages = hugetlb_change_protection(vma, start, end, newprot);
  	else
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
205
  		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
7da4d641c   Peter Zijlstra   mm: Count the num...
206
207
208
  	mmu_notifier_invalidate_range_end(mm, start, end);
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
209
  }
b6a2fea39   Ollie Wild   mm: variable leng...
210
  int
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
211
212
213
214
215
216
217
  mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
  	unsigned long start, unsigned long end, unsigned long newflags)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long oldflags = vma->vm_flags;
  	long nrpages = (end - start) >> PAGE_SHIFT;
  	unsigned long charged = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218
219
  	pgoff_t pgoff;
  	int error;
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
220
  	int dirty_accountable = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
221
222
223
224
225
226
227
228
229
  
  	if (newflags == oldflags) {
  		*pprev = vma;
  		return 0;
  	}
  
  	/*
  	 * If we make a private mapping writable we increase our commit;
  	 * but (without finer accounting) cannot reduce our commit if we
5a6fe1259   Mel Gorman   Do not account fo...
230
231
  	 * make it unwritable again. hugetlb mapping were accounted for
  	 * even if read-only so there is no need to account for them here
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
232
233
  	 */
  	if (newflags & VM_WRITE) {
5a6fe1259   Mel Gorman   Do not account fo...
234
  		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
cdfd4325c   Andy Whitcroft   mm: record MAP_NO...
235
  						VM_SHARED|VM_NORESERVE))) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
236
  			charged = nrpages;
191c54244   Al Viro   mm: collapse secu...
237
  			if (security_vm_enough_memory_mm(mm, charged))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
238
239
240
241
  				return -ENOMEM;
  			newflags |= VM_ACCOUNT;
  		}
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
  	/*
  	 * First try to merge with previous and/or next vma.
  	 */
  	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  	*pprev = vma_merge(mm, *pprev, start, end, newflags,
  			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
  	if (*pprev) {
  		vma = *pprev;
  		goto success;
  	}
  
  	*pprev = vma;
  
  	if (start != vma->vm_start) {
  		error = split_vma(mm, vma, start, 1);
  		if (error)
  			goto fail;
  	}
  
  	if (end != vma->vm_end) {
  		error = split_vma(mm, vma, end, 0);
  		if (error)
  			goto fail;
  	}
  
  success:
  	/*
  	 * vm_flags and vm_page_prot are protected by the mmap_sem
  	 * held in write mode.
  	 */
  	vma->vm_flags = newflags;
1c12c4cf9   Venki Pallipadi   mprotect: prevent...
273
274
  	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
  					  vm_get_page_prot(newflags));
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
275
  	if (vma_wants_writenotify(vma)) {
1ddd439ef   Hugh Dickins   fix mprotect vma_...
276
  		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
277
278
  		dirty_accountable = 1;
  	}
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
279

7d12efaea   Andrew Morton   mm/mprotect.c: co...
280
281
  	change_protection(vma, start, end, vma->vm_page_prot,
  			  dirty_accountable, 0);
7da4d641c   Peter Zijlstra   mm: Count the num...
282

ab50b8ed8   Hugh Dickins   [PATCH] mm: vm_st...
283
284
  	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
  	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
63bfd7384   Pekka Enberg   perf_events: Fix ...
285
  	perf_event_mmap(vma);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
286
287
288
289
290
291
  	return 0;
  
  fail:
  	vm_unacct_memory(charged);
  	return error;
  }
6a6160a7b   Heiko Carstens   [CVE-2009-0029] S...
292
293
  SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
  		unsigned long, prot)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
  {
  	unsigned long vm_flags, nstart, end, tmp, reqprot;
  	struct vm_area_struct *vma, *prev;
  	int error = -EINVAL;
  	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
  	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
  	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
  		return -EINVAL;
  
  	if (start & ~PAGE_MASK)
  		return -EINVAL;
  	if (!len)
  		return 0;
  	len = PAGE_ALIGN(len);
  	end = start + len;
  	if (end <= start)
  		return -ENOMEM;
b845f313d   Dave Kleikamp   mm: Allow archite...
311
  	if (!arch_validate_prot(prot))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
312
313
314
315
316
317
  		return -EINVAL;
  
  	reqprot = prot;
  	/*
  	 * Does the application expect PROT_READ to imply PROT_EXEC:
  	 */
b344e05c5   Hua Zhong   [PATCH] likely cl...
318
  	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
319
320
321
322
323
  		prot |= PROT_EXEC;
  
  	vm_flags = calc_vm_prot_bits(prot);
  
  	down_write(&current->mm->mmap_sem);
097d59106   Linus Torvalds   vm: avoid using f...
324
  	vma = find_vma(current->mm, start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
325
326
327
  	error = -ENOMEM;
  	if (!vma)
  		goto out;
097d59106   Linus Torvalds   vm: avoid using f...
328
  	prev = vma->vm_prev;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
329
330
331
332
333
334
335
  	if (unlikely(grows & PROT_GROWSDOWN)) {
  		if (vma->vm_start >= end)
  			goto out;
  		start = vma->vm_start;
  		error = -EINVAL;
  		if (!(vma->vm_flags & VM_GROWSDOWN))
  			goto out;
7d12efaea   Andrew Morton   mm/mprotect.c: co...
336
  	} else {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
337
338
339
340
341
342
343
344
345
346
347
348
349
350
  		if (vma->vm_start > start)
  			goto out;
  		if (unlikely(grows & PROT_GROWSUP)) {
  			end = vma->vm_end;
  			error = -EINVAL;
  			if (!(vma->vm_flags & VM_GROWSUP))
  				goto out;
  		}
  	}
  	if (start > vma->vm_start)
  		prev = vma;
  
  	for (nstart = start ; ; ) {
  		unsigned long newflags;
7d12efaea   Andrew Morton   mm/mprotect.c: co...
351
  		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
352

7d12efaea   Andrew Morton   mm/mprotect.c: co...
353
354
  		newflags = vm_flags;
  		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
355

7e2cff42c   Paolo 'Blaisorblade' Giarrusso   [PATCH] mm: add a...
356
357
  		/* newflags >> 4 shift VM_MAY% in place of VM_% */
  		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
  			error = -EACCES;
  			goto out;
  		}
  
  		error = security_file_mprotect(vma, reqprot, prot);
  		if (error)
  			goto out;
  
  		tmp = vma->vm_end;
  		if (tmp > end)
  			tmp = end;
  		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
  		if (error)
  			goto out;
  		nstart = tmp;
  
  		if (nstart < prev->vm_end)
  			nstart = prev->vm_end;
  		if (nstart >= end)
  			goto out;
  
  		vma = prev->vm_next;
  		if (!vma || vma->vm_start != nstart) {
  			error = -ENOMEM;
  			goto out;
  		}
  	}
  out:
  	up_write(&current->mm->mmap_sem);
  	return error;
  }