Blame view

mm/mprotect.c 10.6 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
  /*
   *  mm/mprotect.c
   *
   *  (C) Copyright 1994 Linus Torvalds
   *  (C) Copyright 2002 Christoph Hellwig
   *
046c68842   Alan Cox   mm: update my add...
7
   *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
   *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
   */
  
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
15
16
17
18
19
20
  #include <linux/shm.h>
  #include <linux/mman.h>
  #include <linux/fs.h>
  #include <linux/highmem.h>
  #include <linux/security.h>
  #include <linux/mempolicy.h>
  #include <linux/personality.h>
  #include <linux/syscalls.h>
0697212a4   Christoph Lameter   [PATCH] Swapless ...
21
22
  #include <linux/swap.h>
  #include <linux/swapops.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
23
  #include <linux/mmu_notifier.h>
64cdd548f   KOSAKI Motohiro   mm: cleanup: remo...
24
  #include <linux/migrate.h>
cdd6c482c   Ingo Molnar   perf: Do the big ...
25
  #include <linux/perf_event.h>
64a9a34e2   Mel Gorman   mm: numa: do not ...
26
  #include <linux/ksm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
27
28
29
30
  #include <asm/uaccess.h>
  #include <asm/pgtable.h>
  #include <asm/cacheflush.h>
  #include <asm/tlbflush.h>
1ad9f620c   Mel Gorman   mm: numa: recheck...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
  /*
   * For a prot_numa update we only hold mmap_sem for read so there is a
   * potential race with faulting where a pmd was temporarily none. This
   * function checks for a transhuge pmd under the appropriate lock. It
   * returns a pte if it was successfully locked or NULL if it raced with
   * a transhuge insertion.
   */
  static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
  			unsigned long addr, int prot_numa, spinlock_t **ptl)
  {
  	pte_t *pte;
  	spinlock_t *pmdl;
  
  	/* !prot_numa is protected by mmap_sem held for write */
  	if (!prot_numa)
  		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
  
  	pmdl = pmd_lock(vma->vm_mm, pmd);
  	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
  		spin_unlock(pmdl);
  		return NULL;
  	}
  
  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
  	spin_unlock(pmdl);
  	return pte;
  }
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
58
  static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
59
  		unsigned long addr, unsigned long end, pgprot_t newprot,
0f19c1792   Mel Gorman   mm: numa: Do not ...
60
  		int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
61
  {
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
62
  	struct mm_struct *mm = vma->vm_mm;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
63
  	pte_t *pte, oldpte;
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
64
  	spinlock_t *ptl;
7da4d641c   Peter Zijlstra   mm: Count the num...
65
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
66

1ad9f620c   Mel Gorman   mm: numa: recheck...
67
68
69
  	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
  	if (!pte)
  		return 0;
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
70
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
71
  	do {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
72
73
  		oldpte = *pte;
  		if (pte_present(oldpte)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
74
  			pte_t ptent;
b191f9b10   Mel Gorman   mm: numa: preserv...
75
  			bool preserve_write = prot_numa && pte_write(oldpte);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
76

e944fd67b   Mel Gorman   mm: numa: do not ...
77
78
79
80
81
82
83
84
85
86
  			/*
  			 * Avoid trapping faults against the zero or KSM
  			 * pages. See similar comment in change_huge_pmd.
  			 */
  			if (prot_numa) {
  				struct page *page;
  
  				page = vm_normal_page(vma, addr, oldpte);
  				if (!page || PageKsm(page))
  					continue;
10c1045f2   Mel Gorman   mm: numa: avoid u...
87
88
89
90
  
  				/* Avoid TLB flush if possible */
  				if (pte_protnone(oldpte))
  					continue;
e944fd67b   Mel Gorman   mm: numa: do not ...
91
  			}
8a0516ed8   Mel Gorman   mm: convert p[te|...
92
93
  			ptent = ptep_modify_prot_start(mm, addr, pte);
  			ptent = pte_modify(ptent, newprot);
b191f9b10   Mel Gorman   mm: numa: preserv...
94
95
  			if (preserve_write)
  				ptent = pte_mkwrite(ptent);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
96

8a0516ed8   Mel Gorman   mm: convert p[te|...
97
98
99
100
101
  			/* Avoid taking write faults for known dirty pages */
  			if (dirty_accountable && pte_dirty(ptent) &&
  					(pte_soft_dirty(ptent) ||
  					 !(vma->vm_flags & VM_SOFTDIRTY))) {
  				ptent = pte_mkwrite(ptent);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
102
  			}
8a0516ed8   Mel Gorman   mm: convert p[te|...
103
104
  			ptep_modify_prot_commit(mm, addr, pte, ptent);
  			pages++;
0661a3361   Kirill A. Shutemov   mm: remove rest u...
105
  		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
106
107
108
  			swp_entry_t entry = pte_to_swp_entry(oldpte);
  
  			if (is_write_migration_entry(entry)) {
c3d16e165   Cyrill Gorcunov   mm: migration: do...
109
  				pte_t newpte;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
110
111
112
113
114
  				/*
  				 * A protection check is difficult so
  				 * just be safe and disable write
  				 */
  				make_migration_entry_read(&entry);
c3d16e165   Cyrill Gorcunov   mm: migration: do...
115
116
117
118
  				newpte = swp_entry_to_pte(entry);
  				if (pte_swp_soft_dirty(oldpte))
  					newpte = pte_swp_mksoft_dirty(newpte);
  				set_pte_at(mm, addr, pte, newpte);
e920e14ca   Mel Gorman   mm: Do not flush ...
119
120
  
  				pages++;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
121
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
122
123
  		}
  	} while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
124
  	arch_leave_lazy_mmu_mode();
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
125
  	pte_unmap_unlock(pte - 1, ptl);
7da4d641c   Peter Zijlstra   mm: Count the num...
126
127
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
128
  }
7d12efaea   Andrew Morton   mm/mprotect.c: co...
129
130
131
  static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
  		pud_t *pud, unsigned long addr, unsigned long end,
  		pgprot_t newprot, int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
132
133
  {
  	pmd_t *pmd;
a5338093b   Rik van Riel   mm: move mmu noti...
134
  	struct mm_struct *mm = vma->vm_mm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
135
  	unsigned long next;
7da4d641c   Peter Zijlstra   mm: Count the num...
136
  	unsigned long pages = 0;
72403b4a0   Mel Gorman   mm: numa: return ...
137
  	unsigned long nr_huge_updates = 0;
a5338093b   Rik van Riel   mm: move mmu noti...
138
  	unsigned long mni_start = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
139
140
141
  
  	pmd = pmd_offset(pud, addr);
  	do {
25cbbef19   Mel Gorman   mm: numa: Trap pm...
142
  		unsigned long this_pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
143
  		next = pmd_addr_end(addr, end);
88a9ab6e3   Rik van Riel   mm,numa: reorgani...
144
145
  		if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
  			continue;
a5338093b   Rik van Riel   mm: move mmu noti...
146
147
148
149
150
151
  
  		/* invoke the mmu notifier if the pmd is populated */
  		if (!mni_start) {
  			mni_start = addr;
  			mmu_notifier_invalidate_range_start(mm, mni_start, end);
  		}
cd7548ab3   Johannes Weiner   thp: mprotect: tr...
152
153
  		if (pmd_trans_huge(*pmd)) {
  			if (next - addr != HPAGE_PMD_SIZE)
e180377f1   Kirill A. Shutemov   thp: change split...
154
  				split_huge_page_pmd(vma, addr, pmd);
f123d74ab   Mel Gorman   mm: Only flush TL...
155
156
  			else {
  				int nr_ptes = change_huge_pmd(vma, pmd, addr,
e944fd67b   Mel Gorman   mm: numa: do not ...
157
  						newprot, prot_numa);
f123d74ab   Mel Gorman   mm: Only flush TL...
158
159
  
  				if (nr_ptes) {
72403b4a0   Mel Gorman   mm: numa: return ...
160
161
162
163
  					if (nr_ptes == HPAGE_PMD_NR) {
  						pages += HPAGE_PMD_NR;
  						nr_huge_updates++;
  					}
1ad9f620c   Mel Gorman   mm: numa: recheck...
164
165
  
  					/* huge pmd was handled */
f123d74ab   Mel Gorman   mm: Only flush TL...
166
167
  					continue;
  				}
7da4d641c   Peter Zijlstra   mm: Count the num...
168
  			}
88a9ab6e3   Rik van Riel   mm,numa: reorgani...
169
  			/* fall through, the trans huge pmd just split */
cd7548ab3   Johannes Weiner   thp: mprotect: tr...
170
  		}
25cbbef19   Mel Gorman   mm: numa: Trap pm...
171
  		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
0f19c1792   Mel Gorman   mm: numa: Do not ...
172
  				 dirty_accountable, prot_numa);
25cbbef19   Mel Gorman   mm: numa: Trap pm...
173
  		pages += this_pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
174
  	} while (pmd++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
175

a5338093b   Rik van Riel   mm: move mmu noti...
176
177
  	if (mni_start)
  		mmu_notifier_invalidate_range_end(mm, mni_start, end);
72403b4a0   Mel Gorman   mm: numa: return ...
178
179
  	if (nr_huge_updates)
  		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
7da4d641c   Peter Zijlstra   mm: Count the num...
180
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
181
  }
7d12efaea   Andrew Morton   mm/mprotect.c: co...
182
183
184
  static inline unsigned long change_pud_range(struct vm_area_struct *vma,
  		pgd_t *pgd, unsigned long addr, unsigned long end,
  		pgprot_t newprot, int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
185
186
187
  {
  	pud_t *pud;
  	unsigned long next;
7da4d641c   Peter Zijlstra   mm: Count the num...
188
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
189
190
191
192
193
194
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud))
  			continue;
7da4d641c   Peter Zijlstra   mm: Count the num...
195
  		pages += change_pmd_range(vma, pud, addr, next, newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
196
  				 dirty_accountable, prot_numa);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
197
  	} while (pud++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
198
199
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
200
  }
7da4d641c   Peter Zijlstra   mm: Count the num...
201
  static unsigned long change_protection_range(struct vm_area_struct *vma,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
202
  		unsigned long addr, unsigned long end, pgprot_t newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
203
  		int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
204
205
206
207
208
  {
  	struct mm_struct *mm = vma->vm_mm;
  	pgd_t *pgd;
  	unsigned long next;
  	unsigned long start = addr;
7da4d641c   Peter Zijlstra   mm: Count the num...
209
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
210
211
212
213
  
  	BUG_ON(addr >= end);
  	pgd = pgd_offset(mm, addr);
  	flush_cache_range(vma, addr, end);
208414059   Rik van Riel   mm: fix TLB flush...
214
  	set_tlb_flush_pending(mm);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
215
216
217
218
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
7da4d641c   Peter Zijlstra   mm: Count the num...
219
  		pages += change_pud_range(vma, pgd, addr, next, newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
220
  				 dirty_accountable, prot_numa);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
221
  	} while (pgd++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
222

1233d5882   Ingo Molnar   mm: Optimize the ...
223
224
225
  	/* Only flush the TLB if we actually modified any entries: */
  	if (pages)
  		flush_tlb_range(vma, start, end);
208414059   Rik van Riel   mm: fix TLB flush...
226
  	clear_tlb_flush_pending(mm);
7da4d641c   Peter Zijlstra   mm: Count the num...
227
228
229
230
231
232
  
  	return pages;
  }
  
  unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
  		       unsigned long end, pgprot_t newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
233
  		       int dirty_accountable, int prot_numa)
7da4d641c   Peter Zijlstra   mm: Count the num...
234
  {
7da4d641c   Peter Zijlstra   mm: Count the num...
235
  	unsigned long pages;
7da4d641c   Peter Zijlstra   mm: Count the num...
236
237
238
  	if (is_vm_hugetlb_page(vma))
  		pages = hugetlb_change_protection(vma, start, end, newprot);
  	else
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
239
  		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
7da4d641c   Peter Zijlstra   mm: Count the num...
240
241
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
242
  }
b6a2fea39   Ollie Wild   mm: variable leng...
243
  int
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
244
245
246
247
248
249
250
  mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
  	unsigned long start, unsigned long end, unsigned long newflags)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long oldflags = vma->vm_flags;
  	long nrpages = (end - start) >> PAGE_SHIFT;
  	unsigned long charged = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
251
252
  	pgoff_t pgoff;
  	int error;
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
253
  	int dirty_accountable = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
254
255
256
257
258
259
260
261
262
  
  	if (newflags == oldflags) {
  		*pprev = vma;
  		return 0;
  	}
  
  	/*
  	 * If we make a private mapping writable we increase our commit;
  	 * but (without finer accounting) cannot reduce our commit if we
5a6fe1259   Mel Gorman   Do not account fo...
263
264
  	 * make it unwritable again. hugetlb mapping were accounted for
  	 * even if read-only so there is no need to account for them here
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
265
266
  	 */
  	if (newflags & VM_WRITE) {
5a6fe1259   Mel Gorman   Do not account fo...
267
  		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
cdfd4325c   Andy Whitcroft   mm: record MAP_NO...
268
  						VM_SHARED|VM_NORESERVE))) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
269
  			charged = nrpages;
191c54244   Al Viro   mm: collapse secu...
270
  			if (security_vm_enough_memory_mm(mm, charged))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
271
272
273
274
  				return -ENOMEM;
  			newflags |= VM_ACCOUNT;
  		}
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
275
276
277
278
279
  	/*
  	 * First try to merge with previous and/or next vma.
  	 */
  	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  	*pprev = vma_merge(mm, *pprev, start, end, newflags,
247ec28ed   Colin Cross   mm: add a field t...
280
281
  			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
  			vma_get_anon_name(vma));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
  	if (*pprev) {
  		vma = *pprev;
  		goto success;
  	}
  
  	*pprev = vma;
  
  	if (start != vma->vm_start) {
  		error = split_vma(mm, vma, start, 1);
  		if (error)
  			goto fail;
  	}
  
  	if (end != vma->vm_end) {
  		error = split_vma(mm, vma, end, 0);
  		if (error)
  			goto fail;
  	}
  
  success:
  	/*
  	 * vm_flags and vm_page_prot are protected by the mmap_sem
  	 * held in write mode.
  	 */
  	vma->vm_flags = newflags;
64e455079   Peter Feiner   mm: softdirty: en...
307
308
  	dirty_accountable = vma_wants_writenotify(vma);
  	vma_set_page_prot(vma);
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
309

7d12efaea   Andrew Morton   mm/mprotect.c: co...
310
311
  	change_protection(vma, start, end, vma->vm_page_prot,
  			  dirty_accountable, 0);
7da4d641c   Peter Zijlstra   mm: Count the num...
312

ab50b8ed8   Hugh Dickins   [PATCH] mm: vm_st...
313
314
  	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
  	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
63bfd7384   Pekka Enberg   perf_events: Fix ...
315
  	perf_event_mmap(vma);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
316
317
318
319
320
321
  	return 0;
  
  fail:
  	vm_unacct_memory(charged);
  	return error;
  }
6a6160a7b   Heiko Carstens   [CVE-2009-0029] S...
322
323
  SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
  		unsigned long, prot)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
  {
  	unsigned long vm_flags, nstart, end, tmp, reqprot;
  	struct vm_area_struct *vma, *prev;
  	int error = -EINVAL;
  	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
  	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
  	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
  		return -EINVAL;
  
  	if (start & ~PAGE_MASK)
  		return -EINVAL;
  	if (!len)
  		return 0;
  	len = PAGE_ALIGN(len);
  	end = start + len;
  	if (end <= start)
  		return -ENOMEM;
b845f313d   Dave Kleikamp   mm: Allow archite...
341
  	if (!arch_validate_prot(prot))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
342
343
344
345
346
347
  		return -EINVAL;
  
  	reqprot = prot;
  	/*
  	 * Does the application expect PROT_READ to imply PROT_EXEC:
  	 */
b344e05c5   Hua Zhong   [PATCH] likely cl...
348
  	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
349
350
351
352
353
  		prot |= PROT_EXEC;
  
  	vm_flags = calc_vm_prot_bits(prot);
  
  	down_write(&current->mm->mmap_sem);
097d59106   Linus Torvalds   vm: avoid using f...
354
  	vma = find_vma(current->mm, start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
355
356
357
  	error = -ENOMEM;
  	if (!vma)
  		goto out;
097d59106   Linus Torvalds   vm: avoid using f...
358
  	prev = vma->vm_prev;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
359
360
361
362
363
364
365
  	if (unlikely(grows & PROT_GROWSDOWN)) {
  		if (vma->vm_start >= end)
  			goto out;
  		start = vma->vm_start;
  		error = -EINVAL;
  		if (!(vma->vm_flags & VM_GROWSDOWN))
  			goto out;
7d12efaea   Andrew Morton   mm/mprotect.c: co...
366
  	} else {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
367
368
369
370
371
372
373
374
375
376
377
378
379
380
  		if (vma->vm_start > start)
  			goto out;
  		if (unlikely(grows & PROT_GROWSUP)) {
  			end = vma->vm_end;
  			error = -EINVAL;
  			if (!(vma->vm_flags & VM_GROWSUP))
  				goto out;
  		}
  	}
  	if (start > vma->vm_start)
  		prev = vma;
  
  	for (nstart = start ; ; ) {
  		unsigned long newflags;
7d12efaea   Andrew Morton   mm/mprotect.c: co...
381
  		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
382

7d12efaea   Andrew Morton   mm/mprotect.c: co...
383
384
  		newflags = vm_flags;
  		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
385

7e2cff42c   Paolo 'Blaisorblade' Giarrusso   [PATCH] mm: add a...
386
387
  		/* newflags >> 4 shift VM_MAY% in place of VM_% */
  		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
  			error = -EACCES;
  			goto out;
  		}
  
  		error = security_file_mprotect(vma, reqprot, prot);
  		if (error)
  			goto out;
  
  		tmp = vma->vm_end;
  		if (tmp > end)
  			tmp = end;
  		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
  		if (error)
  			goto out;
  		nstart = tmp;
  
  		if (nstart < prev->vm_end)
  			nstart = prev->vm_end;
  		if (nstart >= end)
  			goto out;
  
  		vma = prev->vm_next;
  		if (!vma || vma->vm_start != nstart) {
  			error = -ENOMEM;
  			goto out;
  		}
  	}
  out:
  	up_write(&current->mm->mmap_sem);
  	return error;
  }