Blame view
mm/mprotect.c
15.6 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c
|
2 3 4 5 6 7 |
/* * mm/mprotect.c * * (C) Copyright 1994 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig * |
046c68842
|
8 |
* Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4c
|
9 10 11 12 13 |
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/hugetlb.h> |
1da177e4c
|
14 15 16 17 18 19 20 21 |
#include <linux/shm.h> #include <linux/mman.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/mempolicy.h> #include <linux/personality.h> #include <linux/syscalls.h> |
0697212a4
|
22 23 |
#include <linux/swap.h> #include <linux/swapops.h> |
cddb8a5c1
|
24 |
#include <linux/mmu_notifier.h> |
64cdd548f
|
25 |
#include <linux/migrate.h> |
cdd6c482c
|
26 |
#include <linux/perf_event.h> |
e8c24d3a2
|
27 |
#include <linux/pkeys.h> |
64a9a34e2
|
28 |
#include <linux/ksm.h> |
7c0f6ba68
|
29 |
#include <linux/uaccess.h> |
09a913a7a
|
30 |
#include <linux/mm_inline.h> |
1da177e4c
|
31 32 |
#include <asm/pgtable.h> #include <asm/cacheflush.h> |
e8c24d3a2
|
33 |
#include <asm/mmu_context.h> |
1da177e4c
|
34 |
#include <asm/tlbflush.h> |
36f881883
|
35 |
#include "internal.h" |
4b10e7d56
|
36 |
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
c1e6098b2
|
37 |
unsigned long addr, unsigned long end, pgprot_t newprot, |
0f19c1792
|
38 |
int dirty_accountable, int prot_numa) |
1da177e4c
|
39 |
{ |
0697212a4
|
40 |
pte_t *pte, oldpte; |
705e87c0c
|
41 |
spinlock_t *ptl; |
7da4d641c
|
42 |
unsigned long pages = 0; |
3e3215876
|
43 |
int target_node = NUMA_NO_NODE; |
1da177e4c
|
44 |
|
175ad4f1e
|
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
/* * Can be called with only the mmap_sem for reading by * prot_numa so we must check the pmd isn't constantly * changing from under us from pmd_none to pmd_trans_huge * and/or the other way around. */ if (pmd_trans_unstable(pmd)) return 0; /* * The pmd points to a regular pte so the pmd can't change * from under us even if the mmap_sem is only hold for * reading. */ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
1ad9f620c
|
60 |
|
3e3215876
|
61 62 63 64 |
/* Get target node for single threaded private VMAs */ if (prot_numa && !(vma->vm_flags & VM_SHARED) && atomic_read(&vma->vm_mm->mm_users) == 1) target_node = numa_node_id(); |
3ea277194
|
65 |
flush_tlb_batched_pending(vma->vm_mm); |
6606c3e0d
|
66 |
arch_enter_lazy_mmu_mode(); |
1da177e4c
|
67 |
do { |
0697212a4
|
68 69 |
oldpte = *pte; if (pte_present(oldpte)) { |
1da177e4c
|
70 |
pte_t ptent; |
b191f9b10
|
71 |
bool preserve_write = prot_numa && pte_write(oldpte); |
1da177e4c
|
72 |
|
e944fd67b
|
73 74 75 76 77 78 79 80 81 82 |
/* * Avoid trapping faults against the zero or KSM * pages. See similar comment in change_huge_pmd. */ if (prot_numa) { struct page *page; page = vm_normal_page(vma, addr, oldpte); if (!page || PageKsm(page)) continue; |
10c1045f2
|
83 |
|
859d4adc3
|
84 85 86 87 |
/* Also skip shared copy-on-write pages */ if (is_cow_mapping(vma->vm_flags) && page_mapcount(page) != 1) continue; |
09a913a7a
|
88 89 90 91 92 93 94 |
/* * While migration can move some dirty pages, * it cannot move them all from MIGRATE_ASYNC * context. */ if (page_is_file_cache(page) && PageDirty(page)) continue; |
10c1045f2
|
95 96 97 |
/* Avoid TLB flush if possible */ if (pte_protnone(oldpte)) continue; |
3e3215876
|
98 99 100 101 102 103 104 |
/* * Don't mess with PTEs if page is already on the node * a single-threaded process is running on. */ if (target_node == page_to_nid(page)) continue; |
e944fd67b
|
105 |
} |
04a864530
|
106 107 |
oldpte = ptep_modify_prot_start(vma, addr, pte); ptent = pte_modify(oldpte, newprot); |
b191f9b10
|
108 |
if (preserve_write) |
288bc5494
|
109 |
ptent = pte_mk_savedwrite(ptent); |
4b10e7d56
|
110 |
|
8a0516ed8
|
111 112 113 114 115 |
/* Avoid taking write faults for known dirty pages */ if (dirty_accountable && pte_dirty(ptent) && (pte_soft_dirty(ptent) || !(vma->vm_flags & VM_SOFTDIRTY))) { ptent = pte_mkwrite(ptent); |
4b10e7d56
|
116 |
} |
04a864530
|
117 |
ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); |
8a0516ed8
|
118 |
pages++; |
0661a3361
|
119 |
} else if (IS_ENABLED(CONFIG_MIGRATION)) { |
0697212a4
|
120 121 122 |
swp_entry_t entry = pte_to_swp_entry(oldpte); if (is_write_migration_entry(entry)) { |
c3d16e165
|
123 |
pte_t newpte; |
0697212a4
|
124 125 126 127 128 |
/* * A protection check is difficult so * just be safe and disable write */ make_migration_entry_read(&entry); |
c3d16e165
|
129 130 131 |
newpte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(oldpte)) newpte = pte_swp_mksoft_dirty(newpte); |
94393c789
|
132 |
set_pte_at(vma->vm_mm, addr, pte, newpte); |
e920e14ca
|
133 134 |
pages++; |
0697212a4
|
135 |
} |
5042db43c
|
136 137 138 139 140 141 142 143 144 145 |
if (is_write_device_private_entry(entry)) { pte_t newpte; /* * We do not preserve soft-dirtiness. See * copy_one_pte() for explanation. */ make_device_private_entry_read(&entry); newpte = swp_entry_to_pte(entry); |
94393c789
|
146 |
set_pte_at(vma->vm_mm, addr, pte, newpte); |
5042db43c
|
147 148 149 |
pages++; } |
1da177e4c
|
150 151 |
} } while (pte++, addr += PAGE_SIZE, addr != end); |
6606c3e0d
|
152 |
arch_leave_lazy_mmu_mode(); |
705e87c0c
|
153 |
pte_unmap_unlock(pte - 1, ptl); |
7da4d641c
|
154 155 |
return pages; |
1da177e4c
|
156 |
} |
7d12efaea
|
157 158 159 |
static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) |
1da177e4c
|
160 161 162 |
{ pmd_t *pmd; unsigned long next; |
7da4d641c
|
163 |
unsigned long pages = 0; |
72403b4a0
|
164 |
unsigned long nr_huge_updates = 0; |
ac46d4f3c
|
165 166 167 |
struct mmu_notifier_range range; range.start = 0; |
1da177e4c
|
168 169 170 |
pmd = pmd_offset(pud, addr); do { |
25cbbef19
|
171 |
unsigned long this_pages; |
1da177e4c
|
172 |
next = pmd_addr_end(addr, end); |
84c3fc4e9
|
173 |
if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) |
5c7fb56e5
|
174 |
&& pmd_none_or_clear_bad(pmd)) |
4991c09c7
|
175 |
goto next; |
a5338093b
|
176 177 |
/* invoke the mmu notifier if the pmd is populated */ |
ac46d4f3c
|
178 |
if (!range.start) { |
7269f9999
|
179 180 181 |
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0, vma, vma->vm_mm, addr, end); |
ac46d4f3c
|
182 |
mmu_notifier_invalidate_range_start(&range); |
a5338093b
|
183 |
} |
84c3fc4e9
|
184 |
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { |
6b9116a65
|
185 |
if (next - addr != HPAGE_PMD_SIZE) { |
fd60775ae
|
186 |
__split_huge_pmd(vma, pmd, addr, false, NULL); |
6b9116a65
|
187 |
} else { |
f123d74ab
|
188 |
int nr_ptes = change_huge_pmd(vma, pmd, addr, |
e944fd67b
|
189 |
newprot, prot_numa); |
f123d74ab
|
190 191 |
if (nr_ptes) { |
72403b4a0
|
192 193 194 195 |
if (nr_ptes == HPAGE_PMD_NR) { pages += HPAGE_PMD_NR; nr_huge_updates++; } |
1ad9f620c
|
196 197 |
/* huge pmd was handled */ |
4991c09c7
|
198 |
goto next; |
f123d74ab
|
199 |
} |
7da4d641c
|
200 |
} |
88a9ab6e3
|
201 |
/* fall through, the trans huge pmd just split */ |
cd7548ab3
|
202 |
} |
25cbbef19
|
203 |
this_pages = change_pte_range(vma, pmd, addr, next, newprot, |
0f19c1792
|
204 |
dirty_accountable, prot_numa); |
25cbbef19
|
205 |
pages += this_pages; |
4991c09c7
|
206 207 |
next: cond_resched(); |
1da177e4c
|
208 |
} while (pmd++, addr = next, addr != end); |
7da4d641c
|
209 |
|
ac46d4f3c
|
210 211 |
if (range.start) mmu_notifier_invalidate_range_end(&range); |
a5338093b
|
212 |
|
72403b4a0
|
213 214 |
if (nr_huge_updates) count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); |
7da4d641c
|
215 |
return pages; |
1da177e4c
|
216 |
} |
7d12efaea
|
217 |
static inline unsigned long change_pud_range(struct vm_area_struct *vma, |
c2febafc6
|
218 |
p4d_t *p4d, unsigned long addr, unsigned long end, |
7d12efaea
|
219 |
pgprot_t newprot, int dirty_accountable, int prot_numa) |
1da177e4c
|
220 221 222 |
{ pud_t *pud; unsigned long next; |
7da4d641c
|
223 |
unsigned long pages = 0; |
1da177e4c
|
224 |
|
c2febafc6
|
225 |
pud = pud_offset(p4d, addr); |
1da177e4c
|
226 227 228 229 |
do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; |
7da4d641c
|
230 |
pages += change_pmd_range(vma, pud, addr, next, newprot, |
4b10e7d56
|
231 |
dirty_accountable, prot_numa); |
1da177e4c
|
232 |
} while (pud++, addr = next, addr != end); |
7da4d641c
|
233 234 |
return pages; |
1da177e4c
|
235 |
} |
c2febafc6
|
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 |
static inline unsigned long change_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) { p4d_t *p4d; unsigned long next; unsigned long pages = 0; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) continue; pages += change_pud_range(vma, p4d, addr, next, newprot, dirty_accountable, prot_numa); } while (p4d++, addr = next, addr != end); return pages; } |
7da4d641c
|
255 |
static unsigned long change_protection_range(struct vm_area_struct *vma, |
c1e6098b2
|
256 |
unsigned long addr, unsigned long end, pgprot_t newprot, |
4b10e7d56
|
257 |
int dirty_accountable, int prot_numa) |
1da177e4c
|
258 259 260 261 262 |
{ struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; unsigned long next; unsigned long start = addr; |
7da4d641c
|
263 |
unsigned long pages = 0; |
1da177e4c
|
264 265 266 267 |
BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); |
16af97dc5
|
268 |
inc_tlb_flush_pending(mm); |
1da177e4c
|
269 270 271 272 |
do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; |
c2febafc6
|
273 |
pages += change_p4d_range(vma, pgd, addr, next, newprot, |
4b10e7d56
|
274 |
dirty_accountable, prot_numa); |
1da177e4c
|
275 |
} while (pgd++, addr = next, addr != end); |
7da4d641c
|
276 |
|
1233d5882
|
277 278 279 |
/* Only flush the TLB if we actually modified any entries: */ if (pages) flush_tlb_range(vma, start, end); |
16af97dc5
|
280 |
dec_tlb_flush_pending(mm); |
7da4d641c
|
281 282 283 284 285 286 |
return pages; } unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, |
4b10e7d56
|
287 |
int dirty_accountable, int prot_numa) |
7da4d641c
|
288 |
{ |
7da4d641c
|
289 |
unsigned long pages; |
7da4d641c
|
290 291 292 |
if (is_vm_hugetlb_page(vma)) pages = hugetlb_change_protection(vma, start, end, newprot); else |
4b10e7d56
|
293 |
pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); |
7da4d641c
|
294 295 |
return pages; |
1da177e4c
|
296 |
} |
42e4089c7
|
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 |
static int prot_none_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? 0 : -EACCES; } static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long next, struct mm_walk *walk) { return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? 0 : -EACCES; } static int prot_none_test(unsigned long addr, unsigned long next, struct mm_walk *walk) { return 0; } static int prot_none_walk(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long newflags) { pgprot_t new_pgprot = vm_get_page_prot(newflags); struct mm_walk prot_none_walk = { .pte_entry = prot_none_pte_entry, .hugetlb_entry = prot_none_hugetlb_entry, .test_walk = prot_none_test, .mm = current->mm, .private = &new_pgprot, }; return walk_page_range(start, end, &prot_none_walk); } |
b6a2fea39
|
332 |
int |
1da177e4c
|
333 334 335 336 337 338 339 |
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) { struct mm_struct *mm = vma->vm_mm; unsigned long oldflags = vma->vm_flags; long nrpages = (end - start) >> PAGE_SHIFT; unsigned long charged = 0; |
1da177e4c
|
340 341 |
pgoff_t pgoff; int error; |
c1e6098b2
|
342 |
int dirty_accountable = 0; |
1da177e4c
|
343 344 345 346 347 348 349 |
if (newflags == oldflags) { *pprev = vma; return 0; } /* |
42e4089c7
|
350 351 352 353 354 355 356 357 358 359 360 361 362 |
* Do PROT_NONE PFN permission checks here when we can still * bail out without undoing a lot of state. This is a rather * uncommon case, so doesn't need to be very optimized. */ if (arch_has_pfn_modify_check() && (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) { error = prot_none_walk(vma, start, end, newflags); if (error) return error; } /* |
1da177e4c
|
363 364 |
* If we make a private mapping writable we increase our commit; * but (without finer accounting) cannot reduce our commit if we |
5a6fe1259
|
365 366 |
* make it unwritable again. hugetlb mapping were accounted for * even if read-only so there is no need to account for them here |
1da177e4c
|
367 368 |
*/ if (newflags & VM_WRITE) { |
846383359
|
369 370 371 372 |
/* Check space limits when area turns into data. */ if (!may_expand_vm(mm, newflags, nrpages) && may_expand_vm(mm, oldflags, nrpages)) return -ENOMEM; |
5a6fe1259
|
373 |
if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| |
cdfd4325c
|
374 |
VM_SHARED|VM_NORESERVE))) { |
1da177e4c
|
375 |
charged = nrpages; |
191c54244
|
376 |
if (security_vm_enough_memory_mm(mm, charged)) |
1da177e4c
|
377 378 379 380 |
return -ENOMEM; newflags |= VM_ACCOUNT; } } |
1da177e4c
|
381 382 383 384 385 |
/* * First try to merge with previous and/or next vma. */ pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *pprev = vma_merge(mm, *pprev, start, end, newflags, |
19a809afe
|
386 387 |
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx); |
1da177e4c
|
388 389 |
if (*pprev) { vma = *pprev; |
e86f15ee6
|
390 |
VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); |
1da177e4c
|
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 |
goto success; } *pprev = vma; if (start != vma->vm_start) { error = split_vma(mm, vma, start, 1); if (error) goto fail; } if (end != vma->vm_end) { error = split_vma(mm, vma, end, 0); if (error) goto fail; } success: /* * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ vma->vm_flags = newflags; |
6d2329f88
|
414 |
dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); |
64e455079
|
415 |
vma_set_page_prot(vma); |
d08b3851d
|
416 |
|
7d12efaea
|
417 418 |
change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0); |
7da4d641c
|
419 |
|
36f881883
|
420 421 422 423 424 425 426 427 |
/* * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major * fault on access. */ if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && (newflags & VM_WRITE)) { populate_vma_page_range(vma, start, end, NULL); } |
846383359
|
428 429 |
vm_stat_account(mm, oldflags, -nrpages); vm_stat_account(mm, newflags, nrpages); |
63bfd7384
|
430 |
perf_event_mmap(vma); |
1da177e4c
|
431 432 433 434 435 436 |
return 0; fail: vm_unacct_memory(charged); return error; } |
7d06d9c9b
|
437 438 439 440 441 |
/* * pkey==-1 when doing a legacy mprotect() */ static int do_mprotect_pkey(unsigned long start, size_t len, unsigned long prot, int pkey) |
1da177e4c
|
442 |
{ |
62b5f7d01
|
443 |
unsigned long nstart, end, tmp, reqprot; |
1da177e4c
|
444 445 446 |
struct vm_area_struct *vma, *prev; int error = -EINVAL; const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); |
f138556da
|
447 448 |
const bool rier = (current->personality & READ_IMPLIES_EXEC) && (prot & PROT_READ); |
1da177e4c
|
449 450 451 452 453 454 455 456 457 458 459 460 |
prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ return -EINVAL; if (start & ~PAGE_MASK) return -EINVAL; if (!len) return 0; len = PAGE_ALIGN(len); end = start + len; if (end <= start) return -ENOMEM; |
9035cf9a9
|
461 |
if (!arch_validate_prot(prot, start)) |
1da177e4c
|
462 463 464 |
return -EINVAL; reqprot = prot; |
1da177e4c
|
465 |
|
dc0ef0df7
|
466 467 |
if (down_write_killable(¤t->mm->mmap_sem)) return -EINTR; |
1da177e4c
|
468 |
|
e8c24d3a2
|
469 470 471 472 473 474 475 |
/* * If userspace did not allocate the pkey, do not let * them use it here. */ error = -EINVAL; if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) goto out; |
097d59106
|
476 |
vma = find_vma(current->mm, start); |
1da177e4c
|
477 478 479 |
error = -ENOMEM; if (!vma) goto out; |
097d59106
|
480 |
prev = vma->vm_prev; |
1da177e4c
|
481 482 483 484 485 486 487 |
if (unlikely(grows & PROT_GROWSDOWN)) { if (vma->vm_start >= end) goto out; start = vma->vm_start; error = -EINVAL; if (!(vma->vm_flags & VM_GROWSDOWN)) goto out; |
7d12efaea
|
488 |
} else { |
1da177e4c
|
489 490 491 492 493 494 495 496 497 498 499 500 501 |
if (vma->vm_start > start) goto out; if (unlikely(grows & PROT_GROWSUP)) { end = vma->vm_end; error = -EINVAL; if (!(vma->vm_flags & VM_GROWSUP)) goto out; } } if (start > vma->vm_start) prev = vma; for (nstart = start ; ; ) { |
a8502b67d
|
502 |
unsigned long mask_off_old_flags; |
1da177e4c
|
503 |
unsigned long newflags; |
7d06d9c9b
|
504 |
int new_vma_pkey; |
1da177e4c
|
505 |
|
7d12efaea
|
506 |
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
1da177e4c
|
507 |
|
f138556da
|
508 509 510 |
/* Does the application expect PROT_READ to imply PROT_EXEC */ if (rier && (vma->vm_flags & VM_MAYEXEC)) prot |= PROT_EXEC; |
a8502b67d
|
511 512 513 514 515 516 |
/* * Each mprotect() call explicitly passes r/w/x permissions. * If a permission is not passed to mprotect(), it must be * cleared from the VMA. */ mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC | |
2c2d57b5e
|
517 |
VM_FLAGS_CLEAR; |
a8502b67d
|
518 |
|
7d06d9c9b
|
519 520 |
new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); newflags = calc_vm_prot_bits(prot, new_vma_pkey); |
a8502b67d
|
521 |
newflags |= (vma->vm_flags & ~mask_off_old_flags); |
1da177e4c
|
522 |
|
7e2cff42c
|
523 524 |
/* newflags >> 4 shift VM_MAY% in place of VM_% */ if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { |
1da177e4c
|
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 |
error = -EACCES; goto out; } error = security_file_mprotect(vma, reqprot, prot); if (error) goto out; tmp = vma->vm_end; if (tmp > end) tmp = end; error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); if (error) goto out; nstart = tmp; if (nstart < prev->vm_end) nstart = prev->vm_end; if (nstart >= end) goto out; vma = prev->vm_next; if (!vma || vma->vm_start != nstart) { error = -ENOMEM; goto out; } |
f138556da
|
551 |
prot = reqprot; |
1da177e4c
|
552 553 554 555 556 |
} out: up_write(¤t->mm->mmap_sem); return error; } |
7d06d9c9b
|
557 558 559 560 561 562 |
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, unsigned long, prot) { return do_mprotect_pkey(start, len, prot, -1); } |
c7142aead
|
563 |
#ifdef CONFIG_ARCH_HAS_PKEYS |
7d06d9c9b
|
564 565 566 567 568 |
SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, unsigned long, prot, int, pkey) { return do_mprotect_pkey(start, len, prot, pkey); } |
e8c24d3a2
|
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 |
SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val) { int pkey; int ret; /* No flags supported yet. */ if (flags) return -EINVAL; /* check for unsupported init values */ if (init_val & ~PKEY_ACCESS_MASK) return -EINVAL; down_write(¤t->mm->mmap_sem); pkey = mm_pkey_alloc(current->mm); ret = -ENOSPC; if (pkey == -1) goto out; ret = arch_set_user_pkey_access(current, pkey, init_val); if (ret) { mm_pkey_free(current->mm, pkey); goto out; } ret = pkey; out: up_write(¤t->mm->mmap_sem); return ret; } SYSCALL_DEFINE1(pkey_free, int, pkey) { int ret; down_write(¤t->mm->mmap_sem); ret = mm_pkey_free(current->mm, pkey); up_write(¤t->mm->mmap_sem); /* * We could provie warnings or errors if any VMA still * has the pkey set here. */ return ret; } |
c7142aead
|
614 615 |
#endif /* CONFIG_ARCH_HAS_PKEYS */ |