Blame view
mm/mprotect.c
7.63 KB
1da177e4c
|
1 2 3 4 5 6 |
/* * mm/mprotect.c * * (C) Copyright 1994 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig * |
046c68842
|
7 |
* Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4c
|
8 9 10 11 12 |
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> |
1da177e4c
|
13 14 15 16 17 18 19 20 |
#include <linux/shm.h> #include <linux/mman.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/mempolicy.h> #include <linux/personality.h> #include <linux/syscalls.h> |
0697212a4
|
21 22 |
#include <linux/swap.h> #include <linux/swapops.h> |
cddb8a5c1
|
23 |
#include <linux/mmu_notifier.h> |
64cdd548f
|
24 |
#include <linux/migrate.h> |
cdd6c482c
|
25 |
#include <linux/perf_event.h> |
1da177e4c
|
26 27 28 29 |
#include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> |
1c12c4cf9
|
30 31 32 33 34 35 |
#ifndef pgprot_modify static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) { return newprot; } #endif |
1da177e4c
|
36 |
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, |
c1e6098b2
|
37 38 |
unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable) |
1da177e4c
|
39 |
{ |
0697212a4
|
40 |
pte_t *pte, oldpte; |
705e87c0c
|
41 |
spinlock_t *ptl; |
1da177e4c
|
42 |
|
705e87c0c
|
43 |
pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
6606c3e0d
|
44 |
arch_enter_lazy_mmu_mode(); |
1da177e4c
|
45 |
do { |
0697212a4
|
46 47 |
oldpte = *pte; if (pte_present(oldpte)) { |
1da177e4c
|
48 |
pte_t ptent; |
1ea0704e0
|
49 |
ptent = ptep_modify_prot_start(mm, addr, pte); |
c1e6098b2
|
50 |
ptent = pte_modify(ptent, newprot); |
1ea0704e0
|
51 |
|
c1e6098b2
|
52 53 54 55 56 57 |
/* * Avoid taking write faults for pages we know to be * dirty. */ if (dirty_accountable && pte_dirty(ptent)) ptent = pte_mkwrite(ptent); |
1ea0704e0
|
58 59 |
ptep_modify_prot_commit(mm, addr, pte, ptent); |
64cdd548f
|
60 |
} else if (PAGE_MIGRATION && !pte_file(oldpte)) { |
0697212a4
|
61 62 63 64 65 66 67 68 69 70 71 |
swp_entry_t entry = pte_to_swp_entry(oldpte); if (is_write_migration_entry(entry)) { /* * A protection check is difficult so * just be safe and disable write */ make_migration_entry_read(&entry); set_pte_at(mm, addr, pte, swp_entry_to_pte(entry)); } |
1da177e4c
|
72 73 |
} } while (pte++, addr += PAGE_SIZE, addr != end); |
6606c3e0d
|
74 |
arch_leave_lazy_mmu_mode(); |
705e87c0c
|
75 |
pte_unmap_unlock(pte - 1, ptl); |
1da177e4c
|
76 77 78 |
} static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, |
c1e6098b2
|
79 80 |
unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable) |
1da177e4c
|
81 82 83 84 85 86 87 88 89 |
{ pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; |
c1e6098b2
|
90 |
change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable); |
1da177e4c
|
91 92 93 94 |
} while (pmd++, addr = next, addr != end); } static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, |
c1e6098b2
|
95 96 |
unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable) |
1da177e4c
|
97 98 99 100 101 102 103 104 105 |
{ pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; |
c1e6098b2
|
106 |
change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable); |
1da177e4c
|
107 108 109 110 |
} while (pud++, addr = next, addr != end); } static void change_protection(struct vm_area_struct *vma, |
c1e6098b2
|
111 112 |
unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable) |
1da177e4c
|
113 114 115 116 117 118 119 120 121 |
{ struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; unsigned long next; unsigned long start = addr; BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); |
1da177e4c
|
122 123 124 125 |
do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; |
c1e6098b2
|
126 |
change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable); |
1da177e4c
|
127 128 |
} while (pgd++, addr = next, addr != end); flush_tlb_range(vma, start, end); |
1da177e4c
|
129 |
} |
b6a2fea39
|
130 |
int |
1da177e4c
|
131 132 133 134 135 136 137 |
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) { struct mm_struct *mm = vma->vm_mm; unsigned long oldflags = vma->vm_flags; long nrpages = (end - start) >> PAGE_SHIFT; unsigned long charged = 0; |
1da177e4c
|
138 139 |
pgoff_t pgoff; int error; |
c1e6098b2
|
140 |
int dirty_accountable = 0; |
1da177e4c
|
141 142 143 144 145 146 147 148 149 |
if (newflags == oldflags) { *pprev = vma; return 0; } /* * If we make a private mapping writable we increase our commit; * but (without finer accounting) cannot reduce our commit if we |
5a6fe1259
|
150 151 |
* make it unwritable again. hugetlb mapping were accounted for * even if read-only so there is no need to account for them here |
1da177e4c
|
152 153 |
*/ if (newflags & VM_WRITE) { |
5a6fe1259
|
154 |
if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| |
cdfd4325c
|
155 |
VM_SHARED|VM_NORESERVE))) { |
1da177e4c
|
156 157 158 159 160 161 |
charged = nrpages; if (security_vm_enough_memory(charged)) return -ENOMEM; newflags |= VM_ACCOUNT; } } |
1da177e4c
|
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
/* * First try to merge with previous and/or next vma. */ pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *pprev = vma_merge(mm, *pprev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); if (*pprev) { vma = *pprev; goto success; } *pprev = vma; if (start != vma->vm_start) { error = split_vma(mm, vma, start, 1); if (error) goto fail; } if (end != vma->vm_end) { error = split_vma(mm, vma, end, 0); if (error) goto fail; } success: /* * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ vma->vm_flags = newflags; |
1c12c4cf9
|
193 194 |
vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(newflags)); |
c1e6098b2
|
195 |
if (vma_wants_writenotify(vma)) { |
1ddd439ef
|
196 |
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); |
c1e6098b2
|
197 198 |
dirty_accountable = 1; } |
d08b3851d
|
199 |
|
cddb8a5c1
|
200 |
mmu_notifier_invalidate_range_start(mm, start, end); |
8f860591f
|
201 |
if (is_vm_hugetlb_page(vma)) |
d08b3851d
|
202 |
hugetlb_change_protection(vma, start, end, vma->vm_page_prot); |
8f860591f
|
203 |
else |
c1e6098b2
|
204 |
change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable); |
cddb8a5c1
|
205 |
mmu_notifier_invalidate_range_end(mm, start, end); |
ab50b8ed8
|
206 207 |
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); vm_stat_account(mm, newflags, vma->vm_file, nrpages); |
1da177e4c
|
208 209 210 211 212 213 |
return 0; fail: vm_unacct_memory(charged); return error; } |
6a6160a7b
|
214 215 |
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, unsigned long, prot) |
1da177e4c
|
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 |
{ unsigned long vm_flags, nstart, end, tmp, reqprot; struct vm_area_struct *vma, *prev; int error = -EINVAL; const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ return -EINVAL; if (start & ~PAGE_MASK) return -EINVAL; if (!len) return 0; len = PAGE_ALIGN(len); end = start + len; if (end <= start) return -ENOMEM; |
b845f313d
|
233 |
if (!arch_validate_prot(prot)) |
1da177e4c
|
234 235 236 237 238 239 |
return -EINVAL; reqprot = prot; /* * Does the application expect PROT_READ to imply PROT_EXEC: */ |
b344e05c5
|
240 |
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) |
1da177e4c
|
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 |
prot |= PROT_EXEC; vm_flags = calc_vm_prot_bits(prot); down_write(¤t->mm->mmap_sem); vma = find_vma_prev(current->mm, start, &prev); error = -ENOMEM; if (!vma) goto out; if (unlikely(grows & PROT_GROWSDOWN)) { if (vma->vm_start >= end) goto out; start = vma->vm_start; error = -EINVAL; if (!(vma->vm_flags & VM_GROWSDOWN)) goto out; } else { if (vma->vm_start > start) goto out; if (unlikely(grows & PROT_GROWSUP)) { end = vma->vm_end; error = -EINVAL; if (!(vma->vm_flags & VM_GROWSUP)) goto out; } } if (start > vma->vm_start) prev = vma; for (nstart = start ; ; ) { unsigned long newflags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
1da177e4c
|
276 |
newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); |
7e2cff42c
|
277 278 |
/* newflags >> 4 shift VM_MAY% in place of VM_% */ if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { |
1da177e4c
|
279 280 281 282 283 284 285 286 287 288 289 290 291 292 |
error = -EACCES; goto out; } error = security_file_mprotect(vma, reqprot, prot); if (error) goto out; tmp = vma->vm_end; if (tmp > end) tmp = end; error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); if (error) goto out; |
cdd6c482c
|
293 |
perf_event_mmap(vma); |
1da177e4c
|
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 |
nstart = tmp; if (nstart < prev->vm_end) nstart = prev->vm_end; if (nstart >= end) goto out; vma = prev->vm_next; if (!vma || vma->vm_start != nstart) { error = -ENOMEM; goto out; } } out: up_write(¤t->mm->mmap_sem); return error; } |