Blame view

mm/mprotect.c 10.7 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
  /*
   *  mm/mprotect.c
   *
   *  (C) Copyright 1994 Linus Torvalds
   *  (C) Copyright 2002 Christoph Hellwig
   *
046c68842   Alan Cox   mm: update my add...
7
   *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
   *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
   */
  
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
15
16
17
18
19
20
  #include <linux/shm.h>
  #include <linux/mman.h>
  #include <linux/fs.h>
  #include <linux/highmem.h>
  #include <linux/security.h>
  #include <linux/mempolicy.h>
  #include <linux/personality.h>
  #include <linux/syscalls.h>
0697212a4   Christoph Lameter   [PATCH] Swapless ...
21
22
  #include <linux/swap.h>
  #include <linux/swapops.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
23
  #include <linux/mmu_notifier.h>
64cdd548f   KOSAKI Motohiro   mm: cleanup: remo...
24
  #include <linux/migrate.h>
cdd6c482c   Ingo Molnar   perf: Do the big ...
25
  #include <linux/perf_event.h>
64a9a34e2   Mel Gorman   mm: numa: do not ...
26
  #include <linux/ksm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
27
28
29
30
  #include <asm/uaccess.h>
  #include <asm/pgtable.h>
  #include <asm/cacheflush.h>
  #include <asm/tlbflush.h>
1c12c4cf9   Venki Pallipadi   mprotect: prevent...
31
32
33
34
35
36
  #ifndef pgprot_modify
  static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  {
  	return newprot;
  }
  #endif
1ad9f620c   Mel Gorman   mm: numa: recheck...
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
  /*
   * For a prot_numa update we only hold mmap_sem for read so there is a
   * potential race with faulting where a pmd was temporarily none. This
   * function checks for a transhuge pmd under the appropriate lock. It
   * returns a pte if it was successfully locked or NULL if it raced with
   * a transhuge insertion.
   */
  static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
  			unsigned long addr, int prot_numa, spinlock_t **ptl)
  {
  	pte_t *pte;
  	spinlock_t *pmdl;
  
  	/* !prot_numa is protected by mmap_sem held for write */
  	if (!prot_numa)
  		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
  
  	pmdl = pmd_lock(vma->vm_mm, pmd);
  	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
  		spin_unlock(pmdl);
  		return NULL;
  	}
  
  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
  	spin_unlock(pmdl);
  	return pte;
  }
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
64
  static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
65
  		unsigned long addr, unsigned long end, pgprot_t newprot,
0f19c1792   Mel Gorman   mm: numa: Do not ...
66
  		int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
67
  {
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
68
  	struct mm_struct *mm = vma->vm_mm;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
69
  	pte_t *pte, oldpte;
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
70
  	spinlock_t *ptl;
7da4d641c   Peter Zijlstra   mm: Count the num...
71
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
72

1ad9f620c   Mel Gorman   mm: numa: recheck...
73
74
75
  	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
  	if (!pte)
  		return 0;
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
76
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
77
  	do {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
78
79
  		oldpte = *pte;
  		if (pte_present(oldpte)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
80
  			pte_t ptent;
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
81
  			bool updated = false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
82

4b10e7d56   Mel Gorman   mm: mempolicy: Im...
83
  			if (!prot_numa) {
0c5f83c23   Mel Gorman   mm: numa: do not ...
84
  				ptent = ptep_modify_prot_start(mm, addr, pte);
1667918b6   Mel Gorman   mm: numa: clear n...
85
86
  				if (pte_numa(ptent))
  					ptent = pte_mknonnuma(ptent);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
87
  				ptent = pte_modify(ptent, newprot);
9d85d5863   Aneesh Kumar K.V   mm: Dirty account...
88
89
90
91
92
93
94
  				/*
  				 * Avoid taking write faults for pages we
  				 * know to be dirty.
  				 */
  				if (dirty_accountable && pte_dirty(ptent))
  					ptent = pte_mkwrite(ptent);
  				ptep_modify_prot_commit(mm, addr, pte, ptent);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
95
96
97
98
99
  				updated = true;
  			} else {
  				struct page *page;
  
  				page = vm_normal_page(vma, addr, oldpte);
64a9a34e2   Mel Gorman   mm: numa: do not ...
100
  				if (page && !PageKsm(page)) {
1bc115d87   Mel Gorman   mm: numa: Scan pa...
101
  					if (!pte_numa(oldpte)) {
56eecdb91   Aneesh Kumar K.V   mm: Use ptep/pmdp...
102
  						ptep_set_numa(mm, addr, pte);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
103
104
105
106
  						updated = true;
  					}
  				}
  			}
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
107
108
  			if (updated)
  				pages++;
ce1744f4e   Konstantin Khlebnikov   mm: replace PAGE_...
109
  		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
110
111
112
  			swp_entry_t entry = pte_to_swp_entry(oldpte);
  
  			if (is_write_migration_entry(entry)) {
c3d16e165   Cyrill Gorcunov   mm: migration: do...
113
  				pte_t newpte;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
114
115
116
117
118
  				/*
  				 * A protection check is difficult so
  				 * just be safe and disable write
  				 */
  				make_migration_entry_read(&entry);
c3d16e165   Cyrill Gorcunov   mm: migration: do...
119
120
121
122
  				newpte = swp_entry_to_pte(entry);
  				if (pte_swp_soft_dirty(oldpte))
  					newpte = pte_swp_mksoft_dirty(newpte);
  				set_pte_at(mm, addr, pte, newpte);
e920e14ca   Mel Gorman   mm: Do not flush ...
123
124
  
  				pages++;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
125
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
126
127
  		}
  	} while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
128
  	arch_leave_lazy_mmu_mode();
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
129
  	pte_unmap_unlock(pte - 1, ptl);
7da4d641c   Peter Zijlstra   mm: Count the num...
130
131
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
132
  }
7d12efaea   Andrew Morton   mm/mprotect.c: co...
133
134
135
  static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
  		pud_t *pud, unsigned long addr, unsigned long end,
  		pgprot_t newprot, int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
136
137
  {
  	pmd_t *pmd;
a5338093b   Rik van Riel   mm: move mmu noti...
138
  	struct mm_struct *mm = vma->vm_mm;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
139
  	unsigned long next;
7da4d641c   Peter Zijlstra   mm: Count the num...
140
  	unsigned long pages = 0;
72403b4a0   Mel Gorman   mm: numa: return ...
141
  	unsigned long nr_huge_updates = 0;
a5338093b   Rik van Riel   mm: move mmu noti...
142
  	unsigned long mni_start = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
143
144
145
  
  	pmd = pmd_offset(pud, addr);
  	do {
25cbbef19   Mel Gorman   mm: numa: Trap pm...
146
  		unsigned long this_pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
147
  		next = pmd_addr_end(addr, end);
88a9ab6e3   Rik van Riel   mm,numa: reorgani...
148
149
  		if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
  			continue;
a5338093b   Rik van Riel   mm: move mmu noti...
150
151
152
153
154
155
  
  		/* invoke the mmu notifier if the pmd is populated */
  		if (!mni_start) {
  			mni_start = addr;
  			mmu_notifier_invalidate_range_start(mm, mni_start, end);
  		}
cd7548ab3   Johannes Weiner   thp: mprotect: tr...
156
157
  		if (pmd_trans_huge(*pmd)) {
  			if (next - addr != HPAGE_PMD_SIZE)
e180377f1   Kirill A. Shutemov   thp: change split...
158
  				split_huge_page_pmd(vma, addr, pmd);
f123d74ab   Mel Gorman   mm: Only flush TL...
159
160
161
162
163
  			else {
  				int nr_ptes = change_huge_pmd(vma, pmd, addr,
  						newprot, prot_numa);
  
  				if (nr_ptes) {
72403b4a0   Mel Gorman   mm: numa: return ...
164
165
166
167
  					if (nr_ptes == HPAGE_PMD_NR) {
  						pages += HPAGE_PMD_NR;
  						nr_huge_updates++;
  					}
1ad9f620c   Mel Gorman   mm: numa: recheck...
168
169
  
  					/* huge pmd was handled */
f123d74ab   Mel Gorman   mm: Only flush TL...
170
171
  					continue;
  				}
7da4d641c   Peter Zijlstra   mm: Count the num...
172
  			}
88a9ab6e3   Rik van Riel   mm,numa: reorgani...
173
  			/* fall through, the trans huge pmd just split */
cd7548ab3   Johannes Weiner   thp: mprotect: tr...
174
  		}
25cbbef19   Mel Gorman   mm: numa: Trap pm...
175
  		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
0f19c1792   Mel Gorman   mm: numa: Do not ...
176
  				 dirty_accountable, prot_numa);
25cbbef19   Mel Gorman   mm: numa: Trap pm...
177
  		pages += this_pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
178
  	} while (pmd++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
179

a5338093b   Rik van Riel   mm: move mmu noti...
180
181
  	if (mni_start)
  		mmu_notifier_invalidate_range_end(mm, mni_start, end);
72403b4a0   Mel Gorman   mm: numa: return ...
182
183
  	if (nr_huge_updates)
  		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
7da4d641c   Peter Zijlstra   mm: Count the num...
184
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
185
  }
7d12efaea   Andrew Morton   mm/mprotect.c: co...
186
187
188
  static inline unsigned long change_pud_range(struct vm_area_struct *vma,
  		pgd_t *pgd, unsigned long addr, unsigned long end,
  		pgprot_t newprot, int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
189
190
191
  {
  	pud_t *pud;
  	unsigned long next;
7da4d641c   Peter Zijlstra   mm: Count the num...
192
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
193
194
195
196
197
198
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud))
  			continue;
7da4d641c   Peter Zijlstra   mm: Count the num...
199
  		pages += change_pmd_range(vma, pud, addr, next, newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
200
  				 dirty_accountable, prot_numa);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
201
  	} while (pud++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
202
203
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
204
  }
7da4d641c   Peter Zijlstra   mm: Count the num...
205
  static unsigned long change_protection_range(struct vm_area_struct *vma,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
206
  		unsigned long addr, unsigned long end, pgprot_t newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
207
  		int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
208
209
210
211
212
  {
  	struct mm_struct *mm = vma->vm_mm;
  	pgd_t *pgd;
  	unsigned long next;
  	unsigned long start = addr;
7da4d641c   Peter Zijlstra   mm: Count the num...
213
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
214
215
216
217
  
  	BUG_ON(addr >= end);
  	pgd = pgd_offset(mm, addr);
  	flush_cache_range(vma, addr, end);
208414059   Rik van Riel   mm: fix TLB flush...
218
  	set_tlb_flush_pending(mm);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
219
220
221
222
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
7da4d641c   Peter Zijlstra   mm: Count the num...
223
  		pages += change_pud_range(vma, pgd, addr, next, newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
224
  				 dirty_accountable, prot_numa);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
225
  	} while (pgd++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
226

1233d5882   Ingo Molnar   mm: Optimize the ...
227
228
229
  	/* Only flush the TLB if we actually modified any entries: */
  	if (pages)
  		flush_tlb_range(vma, start, end);
208414059   Rik van Riel   mm: fix TLB flush...
230
  	clear_tlb_flush_pending(mm);
7da4d641c   Peter Zijlstra   mm: Count the num...
231
232
233
234
235
236
  
  	return pages;
  }
  
  unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
  		       unsigned long end, pgprot_t newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
237
  		       int dirty_accountable, int prot_numa)
7da4d641c   Peter Zijlstra   mm: Count the num...
238
  {
7da4d641c   Peter Zijlstra   mm: Count the num...
239
  	unsigned long pages;
7da4d641c   Peter Zijlstra   mm: Count the num...
240
241
242
  	if (is_vm_hugetlb_page(vma))
  		pages = hugetlb_change_protection(vma, start, end, newprot);
  	else
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
243
  		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
7da4d641c   Peter Zijlstra   mm: Count the num...
244
245
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
246
  }
b6a2fea39   Ollie Wild   mm: variable leng...
247
  int
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
248
249
250
251
252
253
254
  mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
  	unsigned long start, unsigned long end, unsigned long newflags)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long oldflags = vma->vm_flags;
  	long nrpages = (end - start) >> PAGE_SHIFT;
  	unsigned long charged = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
255
256
  	pgoff_t pgoff;
  	int error;
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
257
  	int dirty_accountable = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
258
259
260
261
262
263
264
265
266
  
  	if (newflags == oldflags) {
  		*pprev = vma;
  		return 0;
  	}
  
  	/*
  	 * If we make a private mapping writable we increase our commit;
  	 * but (without finer accounting) cannot reduce our commit if we
5a6fe1259   Mel Gorman   Do not account fo...
267
268
  	 * make it unwritable again. hugetlb mapping were accounted for
  	 * even if read-only so there is no need to account for them here
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
269
270
  	 */
  	if (newflags & VM_WRITE) {
5a6fe1259   Mel Gorman   Do not account fo...
271
  		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
cdfd4325c   Andy Whitcroft   mm: record MAP_NO...
272
  						VM_SHARED|VM_NORESERVE))) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
273
  			charged = nrpages;
191c54244   Al Viro   mm: collapse secu...
274
  			if (security_vm_enough_memory_mm(mm, charged))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
275
276
277
278
  				return -ENOMEM;
  			newflags |= VM_ACCOUNT;
  		}
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
  	/*
  	 * First try to merge with previous and/or next vma.
  	 */
  	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  	*pprev = vma_merge(mm, *pprev, start, end, newflags,
  			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
  	if (*pprev) {
  		vma = *pprev;
  		goto success;
  	}
  
  	*pprev = vma;
  
  	if (start != vma->vm_start) {
  		error = split_vma(mm, vma, start, 1);
  		if (error)
  			goto fail;
  	}
  
  	if (end != vma->vm_end) {
  		error = split_vma(mm, vma, end, 0);
  		if (error)
  			goto fail;
  	}
  
  success:
  	/*
  	 * vm_flags and vm_page_prot are protected by the mmap_sem
  	 * held in write mode.
  	 */
  	vma->vm_flags = newflags;
1c12c4cf9   Venki Pallipadi   mprotect: prevent...
310
311
  	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
  					  vm_get_page_prot(newflags));
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
312
  	if (vma_wants_writenotify(vma)) {
1ddd439ef   Hugh Dickins   fix mprotect vma_...
313
  		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
314
315
  		dirty_accountable = 1;
  	}
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
316

7d12efaea   Andrew Morton   mm/mprotect.c: co...
317
318
  	change_protection(vma, start, end, vma->vm_page_prot,
  			  dirty_accountable, 0);
7da4d641c   Peter Zijlstra   mm: Count the num...
319

ab50b8ed8   Hugh Dickins   [PATCH] mm: vm_st...
320
321
  	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
  	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
63bfd7384   Pekka Enberg   perf_events: Fix ...
322
  	perf_event_mmap(vma);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
323
324
325
326
327
328
  	return 0;
  
  fail:
  	vm_unacct_memory(charged);
  	return error;
  }
6a6160a7b   Heiko Carstens   [CVE-2009-0029] S...
329
330
  SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
  		unsigned long, prot)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
  {
  	unsigned long vm_flags, nstart, end, tmp, reqprot;
  	struct vm_area_struct *vma, *prev;
  	int error = -EINVAL;
  	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
  	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
  	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
  		return -EINVAL;
  
  	if (start & ~PAGE_MASK)
  		return -EINVAL;
  	if (!len)
  		return 0;
  	len = PAGE_ALIGN(len);
  	end = start + len;
  	if (end <= start)
  		return -ENOMEM;
b845f313d   Dave Kleikamp   mm: Allow archite...
348
  	if (!arch_validate_prot(prot))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
349
350
351
352
353
354
  		return -EINVAL;
  
  	reqprot = prot;
  	/*
  	 * Does the application expect PROT_READ to imply PROT_EXEC:
  	 */
b344e05c5   Hua Zhong   [PATCH] likely cl...
355
  	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
356
357
358
359
360
  		prot |= PROT_EXEC;
  
  	vm_flags = calc_vm_prot_bits(prot);
  
  	down_write(&current->mm->mmap_sem);
097d59106   Linus Torvalds   vm: avoid using f...
361
  	vma = find_vma(current->mm, start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
362
363
364
  	error = -ENOMEM;
  	if (!vma)
  		goto out;
097d59106   Linus Torvalds   vm: avoid using f...
365
  	prev = vma->vm_prev;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
366
367
368
369
370
371
372
  	if (unlikely(grows & PROT_GROWSDOWN)) {
  		if (vma->vm_start >= end)
  			goto out;
  		start = vma->vm_start;
  		error = -EINVAL;
  		if (!(vma->vm_flags & VM_GROWSDOWN))
  			goto out;
7d12efaea   Andrew Morton   mm/mprotect.c: co...
373
  	} else {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
374
375
376
377
378
379
380
381
382
383
384
385
386
387
  		if (vma->vm_start > start)
  			goto out;
  		if (unlikely(grows & PROT_GROWSUP)) {
  			end = vma->vm_end;
  			error = -EINVAL;
  			if (!(vma->vm_flags & VM_GROWSUP))
  				goto out;
  		}
  	}
  	if (start > vma->vm_start)
  		prev = vma;
  
  	for (nstart = start ; ; ) {
  		unsigned long newflags;
7d12efaea   Andrew Morton   mm/mprotect.c: co...
388
  		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
389

7d12efaea   Andrew Morton   mm/mprotect.c: co...
390
391
  		newflags = vm_flags;
  		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
392

7e2cff42c   Paolo 'Blaisorblade' Giarrusso   [PATCH] mm: add a...
393
394
  		/* newflags >> 4 shift VM_MAY% in place of VM_% */
  		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
  			error = -EACCES;
  			goto out;
  		}
  
  		error = security_file_mprotect(vma, reqprot, prot);
  		if (error)
  			goto out;
  
  		tmp = vma->vm_end;
  		if (tmp > end)
  			tmp = end;
  		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
  		if (error)
  			goto out;
  		nstart = tmp;
  
  		if (nstart < prev->vm_end)
  			nstart = prev->vm_end;
  		if (nstart >= end)
  			goto out;
  
  		vma = prev->vm_next;
  		if (!vma || vma->vm_start != nstart) {
  			error = -ENOMEM;
  			goto out;
  		}
  	}
  out:
  	up_write(&current->mm->mmap_sem);
  	return error;
  }