Blame view

mm/mprotect.c 7.63 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
  /*
   *  mm/mprotect.c
   *
   *  (C) Copyright 1994 Linus Torvalds
   *  (C) Copyright 2002 Christoph Hellwig
   *
046c68842   Alan Cox   mm: update my add...
7
   *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
10
11
12
   *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
   */
  
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
15
16
17
18
19
20
  #include <linux/shm.h>
  #include <linux/mman.h>
  #include <linux/fs.h>
  #include <linux/highmem.h>
  #include <linux/security.h>
  #include <linux/mempolicy.h>
  #include <linux/personality.h>
  #include <linux/syscalls.h>
0697212a4   Christoph Lameter   [PATCH] Swapless ...
21
22
  #include <linux/swap.h>
  #include <linux/swapops.h>
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
23
  #include <linux/mmu_notifier.h>
64cdd548f   KOSAKI Motohiro   mm: cleanup: remo...
24
  #include <linux/migrate.h>
cdd6c482c   Ingo Molnar   perf: Do the big ...
25
  #include <linux/perf_event.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
27
28
29
  #include <asm/uaccess.h>
  #include <asm/pgtable.h>
  #include <asm/cacheflush.h>
  #include <asm/tlbflush.h>
1c12c4cf9   Venki Pallipadi   mprotect: prevent...
30
31
32
33
34
35
  #ifndef pgprot_modify
  static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  {
  	return newprot;
  }
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
36
  static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
37
38
  		unsigned long addr, unsigned long end, pgprot_t newprot,
  		int dirty_accountable)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
  {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
40
  	pte_t *pte, oldpte;
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
41
  	spinlock_t *ptl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
42

705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
43
  	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
44
  	arch_enter_lazy_mmu_mode();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
45
  	do {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
46
47
  		oldpte = *pte;
  		if (pte_present(oldpte)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
48
  			pte_t ptent;
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
49
  			ptent = ptep_modify_prot_start(mm, addr, pte);
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
50
  			ptent = pte_modify(ptent, newprot);
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
51

c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
52
53
54
55
56
57
  			/*
  			 * Avoid taking write faults for pages we know to be
  			 * dirty.
  			 */
  			if (dirty_accountable && pte_dirty(ptent))
  				ptent = pte_mkwrite(ptent);
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
58
59
  
  			ptep_modify_prot_commit(mm, addr, pte, ptent);
64cdd548f   KOSAKI Motohiro   mm: cleanup: remo...
60
  		} else if (PAGE_MIGRATION && !pte_file(oldpte)) {
0697212a4   Christoph Lameter   [PATCH] Swapless ...
61
62
63
64
65
66
67
68
69
70
71
  			swp_entry_t entry = pte_to_swp_entry(oldpte);
  
  			if (is_write_migration_entry(entry)) {
  				/*
  				 * A protection check is difficult so
  				 * just be safe and disable write
  				 */
  				make_migration_entry_read(&entry);
  				set_pte_at(mm, addr, pte,
  					swp_entry_to_pte(entry));
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
72
73
  		}
  	} while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0d   Zachary Amsden   [PATCH] paravirt:...
74
  	arch_leave_lazy_mmu_mode();
705e87c0c   Hugh Dickins   [PATCH] mm: pte_o...
75
  	pte_unmap_unlock(pte - 1, ptl);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
76
77
78
  }
  
  static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
79
80
  		unsigned long addr, unsigned long end, pgprot_t newprot,
  		int dirty_accountable)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
81
82
83
84
85
86
87
88
89
  {
  	pmd_t *pmd;
  	unsigned long next;
  
  	pmd = pmd_offset(pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
  		if (pmd_none_or_clear_bad(pmd))
  			continue;
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
90
  		change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
91
92
93
94
  	} while (pmd++, addr = next, addr != end);
  }
  
  static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
95
96
  		unsigned long addr, unsigned long end, pgprot_t newprot,
  		int dirty_accountable)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
97
98
99
100
101
102
103
104
105
  {
  	pud_t *pud;
  	unsigned long next;
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud))
  			continue;
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
106
  		change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
107
108
109
110
  	} while (pud++, addr = next, addr != end);
  }
  
  static void change_protection(struct vm_area_struct *vma,
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
111
112
  		unsigned long addr, unsigned long end, pgprot_t newprot,
  		int dirty_accountable)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
113
114
115
116
117
118
119
120
121
  {
  	struct mm_struct *mm = vma->vm_mm;
  	pgd_t *pgd;
  	unsigned long next;
  	unsigned long start = addr;
  
  	BUG_ON(addr >= end);
  	pgd = pgd_offset(mm, addr);
  	flush_cache_range(vma, addr, end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
122
123
124
125
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd))
  			continue;
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
126
  		change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
127
128
  	} while (pgd++, addr = next, addr != end);
  	flush_tlb_range(vma, start, end);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
129
  }
b6a2fea39   Ollie Wild   mm: variable leng...
130
  int
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
131
132
133
134
135
136
137
  mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
  	unsigned long start, unsigned long end, unsigned long newflags)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long oldflags = vma->vm_flags;
  	long nrpages = (end - start) >> PAGE_SHIFT;
  	unsigned long charged = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
138
139
  	pgoff_t pgoff;
  	int error;
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
140
  	int dirty_accountable = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
141
142
143
144
145
146
147
148
149
  
  	if (newflags == oldflags) {
  		*pprev = vma;
  		return 0;
  	}
  
  	/*
  	 * If we make a private mapping writable we increase our commit;
  	 * but (without finer accounting) cannot reduce our commit if we
5a6fe1259   Mel Gorman   Do not account fo...
150
151
  	 * make it unwritable again. hugetlb mapping were accounted for
  	 * even if read-only so there is no need to account for them here
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
152
153
  	 */
  	if (newflags & VM_WRITE) {
5a6fe1259   Mel Gorman   Do not account fo...
154
  		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
cdfd4325c   Andy Whitcroft   mm: record MAP_NO...
155
  						VM_SHARED|VM_NORESERVE))) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
156
157
158
159
160
161
  			charged = nrpages;
  			if (security_vm_enough_memory(charged))
  				return -ENOMEM;
  			newflags |= VM_ACCOUNT;
  		}
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
  	/*
  	 * First try to merge with previous and/or next vma.
  	 */
  	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  	*pprev = vma_merge(mm, *pprev, start, end, newflags,
  			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
  	if (*pprev) {
  		vma = *pprev;
  		goto success;
  	}
  
  	*pprev = vma;
  
  	if (start != vma->vm_start) {
  		error = split_vma(mm, vma, start, 1);
  		if (error)
  			goto fail;
  	}
  
  	if (end != vma->vm_end) {
  		error = split_vma(mm, vma, end, 0);
  		if (error)
  			goto fail;
  	}
  
  success:
  	/*
  	 * vm_flags and vm_page_prot are protected by the mmap_sem
  	 * held in write mode.
  	 */
  	vma->vm_flags = newflags;
1c12c4cf9   Venki Pallipadi   mprotect: prevent...
193
194
  	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
  					  vm_get_page_prot(newflags));
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
195
  	if (vma_wants_writenotify(vma)) {
1ddd439ef   Hugh Dickins   fix mprotect vma_...
196
  		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
197
198
  		dirty_accountable = 1;
  	}
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
199

cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
200
  	mmu_notifier_invalidate_range_start(mm, start, end);
8f860591f   Zhang, Yanmin   [PATCH] Enable mp...
201
  	if (is_vm_hugetlb_page(vma))
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
202
  		hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
8f860591f   Zhang, Yanmin   [PATCH] Enable mp...
203
  	else
c1e6098b2   Peter Zijlstra   [PATCH] mm: optim...
204
  		change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
cddb8a5c1   Andrea Arcangeli   mmu-notifiers: core
205
  	mmu_notifier_invalidate_range_end(mm, start, end);
ab50b8ed8   Hugh Dickins   [PATCH] mm: vm_st...
206
207
  	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
  	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
208
209
210
211
212
213
  	return 0;
  
  fail:
  	vm_unacct_memory(charged);
  	return error;
  }
6a6160a7b   Heiko Carstens   [CVE-2009-0029] S...
214
215
  SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
  		unsigned long, prot)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
  {
  	unsigned long vm_flags, nstart, end, tmp, reqprot;
  	struct vm_area_struct *vma, *prev;
  	int error = -EINVAL;
  	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
  	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
  	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
  		return -EINVAL;
  
  	if (start & ~PAGE_MASK)
  		return -EINVAL;
  	if (!len)
  		return 0;
  	len = PAGE_ALIGN(len);
  	end = start + len;
  	if (end <= start)
  		return -ENOMEM;
b845f313d   Dave Kleikamp   mm: Allow archite...
233
  	if (!arch_validate_prot(prot))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
234
235
236
237
238
239
  		return -EINVAL;
  
  	reqprot = prot;
  	/*
  	 * Does the application expect PROT_READ to imply PROT_EXEC:
  	 */
b344e05c5   Hua Zhong   [PATCH] likely cl...
240
  	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
  		prot |= PROT_EXEC;
  
  	vm_flags = calc_vm_prot_bits(prot);
  
  	down_write(&current->mm->mmap_sem);
  
  	vma = find_vma_prev(current->mm, start, &prev);
  	error = -ENOMEM;
  	if (!vma)
  		goto out;
  	if (unlikely(grows & PROT_GROWSDOWN)) {
  		if (vma->vm_start >= end)
  			goto out;
  		start = vma->vm_start;
  		error = -EINVAL;
  		if (!(vma->vm_flags & VM_GROWSDOWN))
  			goto out;
  	}
  	else {
  		if (vma->vm_start > start)
  			goto out;
  		if (unlikely(grows & PROT_GROWSUP)) {
  			end = vma->vm_end;
  			error = -EINVAL;
  			if (!(vma->vm_flags & VM_GROWSUP))
  				goto out;
  		}
  	}
  	if (start > vma->vm_start)
  		prev = vma;
  
  	for (nstart = start ; ; ) {
  		unsigned long newflags;
  
  		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
276
  		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
7e2cff42c   Paolo 'Blaisorblade' Giarrusso   [PATCH] mm: add a...
277
278
  		/* newflags >> 4 shift VM_MAY% in place of VM_% */
  		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
279
280
281
282
283
284
285
286
287
288
289
290
291
292
  			error = -EACCES;
  			goto out;
  		}
  
  		error = security_file_mprotect(vma, reqprot, prot);
  		if (error)
  			goto out;
  
  		tmp = vma->vm_end;
  		if (tmp > end)
  			tmp = end;
  		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
  		if (error)
  			goto out;
cdd6c482c   Ingo Molnar   perf: Do the big ...
293
  		perf_event_mmap(vma);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
  		nstart = tmp;
  
  		if (nstart < prev->vm_end)
  			nstart = prev->vm_end;
  		if (nstart >= end)
  			goto out;
  
  		vma = prev->vm_next;
  		if (!vma || vma->vm_start != nstart) {
  			error = -ENOMEM;
  			goto out;
  		}
  	}
  out:
  	up_write(&current->mm->mmap_sem);
  	return error;
  }