Blame view

mm/mprotect.c 10.2 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
  /*
   *  mm/mprotect.c
   *
   *  (C) Copyright 1994 Linus Torvalds
   *  (C) Copyright 2002 Christoph Hellwig
   *
046c68842   Alan Cox   mm: update my add...
7
   *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
   *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
   */
  
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
15
16
17
18
19
20
  #include <linux/shm.h>
  #include <linux/mman.h>
  #include <linux/fs.h>
  #include <linux/highmem.h>
  #include <linux/security.h>
  #include <linux/mempolicy.h>
  #include <linux/personality.h>
  #include <linux/syscalls.h>
0697212a4   Christoph Lameter   [PATCH] Swapless ...
21
22
  #include <linux/swap.h>
  #include <linux/swapops.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
23
  #include <linux/mmu_notifier.h>
64cdd548f   KOSAKI Motohiro   mm: cleanup: remo...
24
  #include <linux/migrate.h>
cdd6c482c   Ingo Molnar   perf: Do the big ...
25
  #include <linux/perf_event.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
27
28
29
  #include <asm/uaccess.h>
  #include <asm/pgtable.h>
  #include <asm/cacheflush.h>
  #include <asm/tlbflush.h>
1c12c4cf9   Venki Pallipadi   mprotect: prevent...
30
31
32
33
34
35
  #ifndef pgprot_modify
  static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  {
  	return newprot;
  }
  #endif
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
36
  static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
37
  		unsigned long addr, unsigned long end, pgprot_t newprot,
9532fec11   Mel Gorman   mm: numa: Migrate...
38
  		int dirty_accountable, int prot_numa, bool *ret_all_same_node)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
  {
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
40
  	struct mm_struct *mm = vma->vm_mm;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
41
  	pte_t *pte, oldpte;
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
42
  	spinlock_t *ptl;
7da4d641c   Peter Zijlstra   mm: Count the num...
43
  	unsigned long pages = 0;
9532fec11   Mel Gorman   mm: numa: Migrate...
44
45
  	bool all_same_node = true;
  	int last_nid = -1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
46

705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
47
  	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
48
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
49
  	do {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
50
51
  		oldpte = *pte;
  		if (pte_present(oldpte)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52
  			pte_t ptent;
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
53
  			bool updated = false;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
54

1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
55
  			ptent = ptep_modify_prot_start(mm, addr, pte);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
56
57
58
59
60
61
62
63
  			if (!prot_numa) {
  				ptent = pte_modify(ptent, newprot);
  				updated = true;
  			} else {
  				struct page *page;
  
  				page = vm_normal_page(vma, addr, oldpte);
  				if (page) {
9532fec11   Mel Gorman   mm: numa: Migrate...
64
65
66
67
68
  					int this_nid = page_to_nid(page);
  					if (last_nid == -1)
  						last_nid = this_nid;
  					if (last_nid != this_nid)
  						all_same_node = false;
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
69
70
71
72
73
74
75
76
  					/* only check non-shared pages */
  					if (!pte_numa(oldpte) &&
  					    page_mapcount(page) == 1) {
  						ptent = pte_mknuma(ptent);
  						updated = true;
  					}
  				}
  			}
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
77

c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
78
79
80
81
  			/*
  			 * Avoid taking write faults for pages we know to be
  			 * dirty.
  			 */
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
82
  			if (dirty_accountable && pte_dirty(ptent)) {
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
83
  				ptent = pte_mkwrite(ptent);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
84
85
  				updated = true;
  			}
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
86

4b10e7d56   Mel Gorman   mm: mempolicy: Im...
87
88
  			if (updated)
  				pages++;
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
89
  			ptep_modify_prot_commit(mm, addr, pte, ptent);
ce1744f4e   Konstantin Khlebnikov   mm: replace PAGE_...
90
  		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
91
92
93
94
95
96
97
98
99
100
101
  			swp_entry_t entry = pte_to_swp_entry(oldpte);
  
  			if (is_write_migration_entry(entry)) {
  				/*
  				 * A protection check is difficult so
  				 * just be safe and disable write
  				 */
  				make_migration_entry_read(&entry);
  				set_pte_at(mm, addr, pte,
  					swp_entry_to_pte(entry));
  			}
7da4d641c   Peter Zijlstra   mm: Count the num...
102
  			pages++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
103
104
  		}
  	} while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
105
  	arch_leave_lazy_mmu_mode();
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
106
  	pte_unmap_unlock(pte - 1, ptl);
7da4d641c   Peter Zijlstra   mm: Count the num...
107

9532fec11   Mel Gorman   mm: numa: Migrate...
108
  	*ret_all_same_node = all_same_node;
7da4d641c   Peter Zijlstra   mm: Count the num...
109
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
110
  }
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
111
112
  #ifdef CONFIG_NUMA_BALANCING
  static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
7d12efaea   Andrew Morton   mm/mprotect.c: co...
113
  				       pmd_t *pmd)
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
114
115
116
117
118
119
120
  {
  	spin_lock(&mm->page_table_lock);
  	set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
  	spin_unlock(&mm->page_table_lock);
  }
  #else
  static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
7d12efaea   Andrew Morton   mm/mprotect.c: co...
121
  				       pmd_t *pmd)
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
122
123
  {
  	BUG();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
124
  }
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
125
  #endif /* CONFIG_NUMA_BALANCING */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
126

7d12efaea   Andrew Morton   mm/mprotect.c: co...
127
128
129
  static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
  		pud_t *pud, unsigned long addr, unsigned long end,
  		pgprot_t newprot, int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
130
131
132
  {
  	pmd_t *pmd;
  	unsigned long next;
7da4d641c   Peter Zijlstra   mm: Count the num...
133
  	unsigned long pages = 0;
9532fec11   Mel Gorman   mm: numa: Migrate...
134
  	bool all_same_node;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
135
136
137
138
  
  	pmd = pmd_offset(pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
cd7548ab3   Johannes Weiner   thp: mprotect: tr...
139
140
  		if (pmd_trans_huge(*pmd)) {
  			if (next - addr != HPAGE_PMD_SIZE)
e180377f1   Kirill A. Shutemov   thp: change split...
141
  				split_huge_page_pmd(vma, addr, pmd);
7d12efaea   Andrew Morton   mm/mprotect.c: co...
142
143
  			else if (change_huge_pmd(vma, pmd, addr, newprot,
  						 prot_numa)) {
7da4d641c   Peter Zijlstra   mm: Count the num...
144
  				pages += HPAGE_PMD_NR;
cd7548ab3   Johannes Weiner   thp: mprotect: tr...
145
  				continue;
7da4d641c   Peter Zijlstra   mm: Count the num...
146
  			}
cd7548ab3   Johannes Weiner   thp: mprotect: tr...
147
148
  			/* fall through */
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
149
150
  		if (pmd_none_or_clear_bad(pmd))
  			continue;
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
151
  		pages += change_pte_range(vma, pmd, addr, next, newprot,
9532fec11   Mel Gorman   mm: numa: Migrate...
152
  				 dirty_accountable, prot_numa, &all_same_node);
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
153

9532fec11   Mel Gorman   mm: numa: Migrate...
154
155
156
157
158
159
160
  		/*
  		 * If we are changing protections for NUMA hinting faults then
  		 * set pmd_numa if the examined pages were all on the same
  		 * node. This allows a regular PMD to be handled as one fault
  		 * and effectively batches the taking of the PTL
  		 */
  		if (prot_numa && all_same_node)
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
161
  			change_pmd_protnuma(vma->vm_mm, addr, pmd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
162
  	} while (pmd++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
163
164
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
165
  }
7d12efaea   Andrew Morton   mm/mprotect.c: co...
166
167
168
  static inline unsigned long change_pud_range(struct vm_area_struct *vma,
  		pgd_t *pgd, unsigned long addr, unsigned long end,
  		pgprot_t newprot, int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
169
170
171
  {
  	pud_t *pud;
  	unsigned long next;
7da4d641c   Peter Zijlstra   mm: Count the num...
172
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
173
174
175
176
177
178
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud))
  			continue;
7da4d641c   Peter Zijlstra   mm: Count the num...
179
  		pages += change_pmd_range(vma, pud, addr, next, newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
180
  				 dirty_accountable, prot_numa);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
181
  	} while (pud++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
182
183
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
184
  }
7da4d641c   Peter Zijlstra   mm: Count the num...
185
  static unsigned long change_protection_range(struct vm_area_struct *vma,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
186
  		unsigned long addr, unsigned long end, pgprot_t newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
187
  		int dirty_accountable, int prot_numa)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
188
189
190
191
192
  {
  	struct mm_struct *mm = vma->vm_mm;
  	pgd_t *pgd;
  	unsigned long next;
  	unsigned long start = addr;
7da4d641c   Peter Zijlstra   mm: Count the num...
193
  	unsigned long pages = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
194
195
196
197
  
  	BUG_ON(addr >= end);
  	pgd = pgd_offset(mm, addr);
  	flush_cache_range(vma, addr, end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
198
199
200
201
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
7da4d641c   Peter Zijlstra   mm: Count the num...
202
  		pages += change_pud_range(vma, pgd, addr, next, newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
203
  				 dirty_accountable, prot_numa);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
204
  	} while (pgd++, addr = next, addr != end);
7da4d641c   Peter Zijlstra   mm: Count the num...
205

1233d5882   Ingo Molnar   mm: Optimize the ...
206
207
208
  	/* Only flush the TLB if we actually modified any entries: */
  	if (pages)
  		flush_tlb_range(vma, start, end);
7da4d641c   Peter Zijlstra   mm: Count the num...
209
210
211
212
213
214
  
  	return pages;
  }
  
  unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
  		       unsigned long end, pgprot_t newprot,
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
215
  		       int dirty_accountable, int prot_numa)
7da4d641c   Peter Zijlstra   mm: Count the num...
216
217
218
219
220
221
222
223
  {
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long pages;
  
  	mmu_notifier_invalidate_range_start(mm, start, end);
  	if (is_vm_hugetlb_page(vma))
  		pages = hugetlb_change_protection(vma, start, end, newprot);
  	else
4b10e7d56   Mel Gorman   mm: mempolicy: Im...
224
  		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
7da4d641c   Peter Zijlstra   mm: Count the num...
225
226
227
  	mmu_notifier_invalidate_range_end(mm, start, end);
  
  	return pages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
228
  }
b6a2fea39   Ollie Wild   mm: variable leng...
229
  int
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
230
231
232
233
234
235
236
  mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
  	unsigned long start, unsigned long end, unsigned long newflags)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long oldflags = vma->vm_flags;
  	long nrpages = (end - start) >> PAGE_SHIFT;
  	unsigned long charged = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
237
238
  	pgoff_t pgoff;
  	int error;
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
239
  	int dirty_accountable = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
240
241
242
243
244
245
246
247
248
  
  	if (newflags == oldflags) {
  		*pprev = vma;
  		return 0;
  	}
  
  	/*
  	 * If we make a private mapping writable we increase our commit;
  	 * but (without finer accounting) cannot reduce our commit if we
5a6fe1259   Mel Gorman   Do not account fo...
249
250
  	 * make it unwritable again. hugetlb mapping were accounted for
  	 * even if read-only so there is no need to account for them here
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
251
252
  	 */
  	if (newflags & VM_WRITE) {
5a6fe1259   Mel Gorman   Do not account fo...
253
  		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
cdfd4325c   Andy Whitcroft   mm: record MAP_NO...
254
  						VM_SHARED|VM_NORESERVE))) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
255
  			charged = nrpages;
191c54244   Al Viro   mm: collapse secu...
256
  			if (security_vm_enough_memory_mm(mm, charged))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
257
258
259
260
  				return -ENOMEM;
  			newflags |= VM_ACCOUNT;
  		}
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
  	/*
  	 * First try to merge with previous and/or next vma.
  	 */
  	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  	*pprev = vma_merge(mm, *pprev, start, end, newflags,
  			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
  	if (*pprev) {
  		vma = *pprev;
  		goto success;
  	}
  
  	*pprev = vma;
  
  	if (start != vma->vm_start) {
  		error = split_vma(mm, vma, start, 1);
  		if (error)
  			goto fail;
  	}
  
  	if (end != vma->vm_end) {
  		error = split_vma(mm, vma, end, 0);
  		if (error)
  			goto fail;
  	}
  
  success:
  	/*
  	 * vm_flags and vm_page_prot are protected by the mmap_sem
  	 * held in write mode.
  	 */
  	vma->vm_flags = newflags;
1c12c4cf9   Venki Pallipadi   mprotect: prevent...
292
293
  	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
  					  vm_get_page_prot(newflags));
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
294
  	if (vma_wants_writenotify(vma)) {
1ddd439ef   Hugh Dickins   fix mprotect vma_...
295
  		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
296
297
  		dirty_accountable = 1;
  	}
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
298

7d12efaea   Andrew Morton   mm/mprotect.c: co...
299
300
  	change_protection(vma, start, end, vma->vm_page_prot,
  			  dirty_accountable, 0);
7da4d641c   Peter Zijlstra   mm: Count the num...
301

ab50b8ed8   Hugh Dickins   [PATCH] mm: vm_st...
302
303
  	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
  	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
63bfd7384   Pekka Enberg   perf_events: Fix ...
304
  	perf_event_mmap(vma);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
305
306
307
308
309
310
  	return 0;
  
  fail:
  	vm_unacct_memory(charged);
  	return error;
  }
6a6160a7b   Heiko Carstens   [CVE-2009-0029] S...
311
312
  SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
  		unsigned long, prot)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
  {
  	unsigned long vm_flags, nstart, end, tmp, reqprot;
  	struct vm_area_struct *vma, *prev;
  	int error = -EINVAL;
  	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
  	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
  	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
  		return -EINVAL;
  
  	if (start & ~PAGE_MASK)
  		return -EINVAL;
  	if (!len)
  		return 0;
  	len = PAGE_ALIGN(len);
  	end = start + len;
  	if (end <= start)
  		return -ENOMEM;
b845f313d   Dave Kleikamp   mm: Allow archite...
330
  	if (!arch_validate_prot(prot))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
331
332
333
334
335
336
  		return -EINVAL;
  
  	reqprot = prot;
  	/*
  	 * Does the application expect PROT_READ to imply PROT_EXEC:
  	 */
b344e05c5   Hua Zhong   [PATCH] likely cl...
337
  	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
338
339
340
341
342
  		prot |= PROT_EXEC;
  
  	vm_flags = calc_vm_prot_bits(prot);
  
  	down_write(&current->mm->mmap_sem);
097d59106   Linus Torvalds   vm: avoid using f...
343
  	vma = find_vma(current->mm, start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
344
345
346
  	error = -ENOMEM;
  	if (!vma)
  		goto out;
097d59106   Linus Torvalds   vm: avoid using f...
347
  	prev = vma->vm_prev;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
348
349
350
351
352
353
354
  	if (unlikely(grows & PROT_GROWSDOWN)) {
  		if (vma->vm_start >= end)
  			goto out;
  		start = vma->vm_start;
  		error = -EINVAL;
  		if (!(vma->vm_flags & VM_GROWSDOWN))
  			goto out;
7d12efaea   Andrew Morton   mm/mprotect.c: co...
355
  	} else {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
356
357
358
359
360
361
362
363
364
365
366
367
368
369
  		if (vma->vm_start > start)
  			goto out;
  		if (unlikely(grows & PROT_GROWSUP)) {
  			end = vma->vm_end;
  			error = -EINVAL;
  			if (!(vma->vm_flags & VM_GROWSUP))
  				goto out;
  		}
  	}
  	if (start > vma->vm_start)
  		prev = vma;
  
  	for (nstart = start ; ; ) {
  		unsigned long newflags;
7d12efaea   Andrew Morton   mm/mprotect.c: co...
370
  		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
371

7d12efaea   Andrew Morton   mm/mprotect.c: co...
372
373
  		newflags = vm_flags;
  		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
374

7e2cff42c   Paolo 'Blaisorblade' Giarrusso   [PATCH] mm: add a...
375
376
  		/* newflags >> 4 shift VM_MAY% in place of VM_% */
  		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
  			error = -EACCES;
  			goto out;
  		}
  
  		error = security_file_mprotect(vma, reqprot, prot);
  		if (error)
  			goto out;
  
  		tmp = vma->vm_end;
  		if (tmp > end)
  			tmp = end;
  		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
  		if (error)
  			goto out;
  		nstart = tmp;
  
  		if (nstart < prev->vm_end)
  			nstart = prev->vm_end;
  		if (nstart >= end)
  			goto out;
  
  		vma = prev->vm_next;
  		if (!vma || vma->vm_start != nstart) {
  			error = -ENOMEM;
  			goto out;
  		}
  	}
  out:
  	up_write(&current->mm->mmap_sem);
  	return error;
  }