Blame view
mm/util.c
15 KB
16d69265b
|
1 |
#include <linux/mm.h> |
30992c97a
|
2 3 |
#include <linux/slab.h> #include <linux/string.h> |
3b32123d7
|
4 |
#include <linux/compiler.h> |
b95f1b31b
|
5 |
#include <linux/export.h> |
96840aa00
|
6 |
#include <linux/err.h> |
3b8f14b41
|
7 |
#include <linux/sched.h> |
eb36c5873
|
8 |
#include <linux/security.h> |
9800339b5
|
9 |
#include <linux/swap.h> |
33806f06d
|
10 |
#include <linux/swapops.h> |
00619bcc4
|
11 12 |
#include <linux/mman.h> #include <linux/hugetlb.h> |
39f1f78d5
|
13 |
#include <linux/vmalloc.h> |
00619bcc4
|
14 |
|
a4bb1e43e
|
15 |
#include <asm/sections.h> |
96840aa00
|
16 |
#include <asm/uaccess.h> |
30992c97a
|
17 |
|
6038def0d
|
18 |
#include "internal.h" |
a4bb1e43e
|
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
static inline int is_kernel_rodata(unsigned long addr) { return addr >= (unsigned long)__start_rodata && addr < (unsigned long)__end_rodata; } /** * kfree_const - conditionally free memory * @x: pointer to the memory * * Function calls kfree only if @x is not in .rodata section. */ void kfree_const(const void *x) { if (!is_kernel_rodata((unsigned long)x)) kfree(x); } EXPORT_SYMBOL(kfree_const); |
30992c97a
|
37 |
/** |
30992c97a
|
38 |
* kstrdup - allocate space for and copy an existing string |
30992c97a
|
39 40 41 42 43 44 45 46 47 48 49 50 |
* @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory */ char *kstrdup(const char *s, gfp_t gfp) { size_t len; char *buf; if (!s) return NULL; len = strlen(s) + 1; |
1d2c8eea6
|
51 |
buf = kmalloc_track_caller(len, gfp); |
30992c97a
|
52 53 54 55 56 |
if (buf) memcpy(buf, s, len); return buf; } EXPORT_SYMBOL(kstrdup); |
96840aa00
|
57 |
|
1a2f67b45
|
58 |
/** |
a4bb1e43e
|
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
* kstrdup_const - conditionally duplicate an existing const string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Function returns source string if it is in .rodata section otherwise it * fallbacks to kstrdup. * Strings allocated by kstrdup_const should be freed by kfree_const. */ const char *kstrdup_const(const char *s, gfp_t gfp) { if (is_kernel_rodata((unsigned long)s)) return s; return kstrdup(s, gfp); } EXPORT_SYMBOL(kstrdup_const); /** |
1e66df3ee
|
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
* kstrndup - allocate space for and copy an existing string * @s: the string to duplicate * @max: read at most @max chars from @s * @gfp: the GFP mask used in the kmalloc() call when allocating memory */ char *kstrndup(const char *s, size_t max, gfp_t gfp) { size_t len; char *buf; if (!s) return NULL; len = strnlen(s, max); buf = kmalloc_track_caller(len+1, gfp); if (buf) { memcpy(buf, s, len); buf[len] = '\0'; } return buf; } EXPORT_SYMBOL(kstrndup); /** |
1a2f67b45
|
101 102 103 104 105 106 107 108 109 |
* kmemdup - duplicate region of memory * * @src: memory region to duplicate * @len: memory region length * @gfp: GFP mask to use */ void *kmemdup(const void *src, size_t len, gfp_t gfp) { void *p; |
1d2c8eea6
|
110 |
p = kmalloc_track_caller(len, gfp); |
1a2f67b45
|
111 112 113 114 115 |
if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL(kmemdup); |
ef2ad80c7
|
116 |
/** |
610a77e04
|
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
* memdup_user - duplicate memory region from user space * * @src: source address in user space * @len: number of bytes to copy * * Returns an ERR_PTR() on failure. */ void *memdup_user(const void __user *src, size_t len) { void *p; /* * Always use GFP_KERNEL, since copy_from_user() can sleep and * cause pagefault, which makes it pointless to use GFP_NOFS * or GFP_ATOMIC. */ p = kmalloc_track_caller(len, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } return p; } EXPORT_SYMBOL(memdup_user); |
96840aa00
|
145 146 |
/* * strndup_user - duplicate an existing string from user space |
96840aa00
|
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
* @s: The string to duplicate * @n: Maximum number of bytes to copy, including the trailing NUL. */ char *strndup_user(const char __user *s, long n) { char *p; long length; length = strnlen_user(s, n); if (!length) return ERR_PTR(-EFAULT); if (length > n) return ERR_PTR(-EINVAL); |
90d740455
|
162 |
p = memdup_user(s, length); |
96840aa00
|
163 |
|
90d740455
|
164 165 |
if (IS_ERR(p)) return p; |
96840aa00
|
166 167 168 169 170 171 |
p[length - 1] = '\0'; return p; } EXPORT_SYMBOL(strndup_user); |
16d69265b
|
172 |
|
e9d408e10
|
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
/** * memdup_user_nul - duplicate memory region from user space and NUL-terminate * * @src: source address in user space * @len: number of bytes to copy * * Returns an ERR_PTR() on failure. */ void *memdup_user_nul(const void __user *src, size_t len) { char *p; /* * Always use GFP_KERNEL, since copy_from_user() can sleep and * cause pagefault, which makes it pointless to use GFP_NOFS * or GFP_ATOMIC. */ p = kmalloc_track_caller(len + 1, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } p[len] = '\0'; return p; } EXPORT_SYMBOL(memdup_user_nul); |
6038def0d
|
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 |
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent) { struct vm_area_struct *next; vma->vm_prev = prev; if (prev) { next = prev->vm_next; prev->vm_next = vma; } else { mm->mmap = vma; if (rb_parent) next = rb_entry(rb_parent, struct vm_area_struct, vm_rb); else next = NULL; } vma->vm_next = next; if (next) next->vm_prev = vma; } |
b76437579
|
224 |
/* Check if the vma is being used as a stack by this task */ |
65376df58
|
225 |
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t) |
b76437579
|
226 227 228 |
{ return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); } |
efc1a3b16
|
229 |
#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) |
16d69265b
|
230 231 232 233 |
void arch_pick_mmap_layout(struct mm_struct *mm) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; |
16d69265b
|
234 235 |
} #endif |
912985dce
|
236 |
|
45888a0c6
|
237 238 239 |
/* * Like get_user_pages_fast() except its IRQ-safe in that it won't fall * back to the regular GUP. |
25985edce
|
240 |
* If the architecture not support this function, simply return with no |
45888a0c6
|
241 242 |
* page pinned */ |
3b32123d7
|
243 |
int __weak __get_user_pages_fast(unsigned long start, |
45888a0c6
|
244 245 246 247 248 |
int nr_pages, int write, struct page **pages) { return 0; } EXPORT_SYMBOL_GPL(__get_user_pages_fast); |
9de100d00
|
249 250 251 252 253 254 255 256 |
/** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * |
9de100d00
|
257 258 259 |
* Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. |
d2bf6be8a
|
260 261 262 263 264 265 266 267 268 269 270 271 |
* * get_user_pages_fast provides equivalent functionality to get_user_pages, * operating on current and current->mm, with force=0 and vma=NULL. However * unlike get_user_pages, it must be called without mmap_sem held. * * get_user_pages_fast may take mmap_sem and page table locks, so no * assumptions can be made about lack of locking. get_user_pages_fast is to be * implemented in a way that is advantageous (vs get_user_pages()) when the * user memory area is already faulted in and present in ptes. However if the * pages have to be faulted in, it may turn out to be slightly slower so * callers need to carefully consider what to use. On many architectures, * get_user_pages_fast simply falls back to get_user_pages. |
9de100d00
|
272 |
*/ |
3b32123d7
|
273 |
int __weak get_user_pages_fast(unsigned long start, |
912985dce
|
274 275 |
int nr_pages, int write, struct page **pages) { |
cde70140f
|
276 |
return get_user_pages_unlocked(start, nr_pages, write, 0, pages); |
912985dce
|
277 278 |
} EXPORT_SYMBOL_GPL(get_user_pages_fast); |
ca2b84cb3
|
279 |
|
eb36c5873
|
280 281 282 283 284 285 |
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff) { unsigned long ret; struct mm_struct *mm = current->mm; |
41badc15c
|
286 |
unsigned long populate; |
eb36c5873
|
287 288 289 290 |
ret = security_mmap_file(file, prot, flag); if (!ret) { down_write(&mm->mmap_sem); |
bebeb3d68
|
291 292 |
ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, &populate); |
eb36c5873
|
293 |
up_write(&mm->mmap_sem); |
41badc15c
|
294 295 |
if (populate) mm_populate(ret, populate); |
eb36c5873
|
296 297 298 299 300 301 302 303 304 305 |
} return ret; } unsigned long vm_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) { if (unlikely(offset + PAGE_ALIGN(len) < offset)) return -EINVAL; |
ea53cde08
|
306 |
if (unlikely(offset_in_page(offset))) |
eb36c5873
|
307 308 309 310 311 |
return -EINVAL; return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); } EXPORT_SYMBOL(vm_mmap); |
39f1f78d5
|
312 313 314 315 316 317 318 319 |
void kvfree(const void *addr) { if (is_vmalloc_addr(addr)) vfree(addr); else kfree(addr); } EXPORT_SYMBOL(kvfree); |
e39155ea1
|
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 |
static inline void *__page_rmapping(struct page *page) { unsigned long mapping; mapping = (unsigned long)page->mapping; mapping &= ~PAGE_MAPPING_FLAGS; return (void *)mapping; } /* Neutral page->mapping pointer to address_space or anon_vma or other */ void *page_rmapping(struct page *page) { page = compound_head(page); return __page_rmapping(page); } struct anon_vma *page_anon_vma(struct page *page) { unsigned long mapping; page = compound_head(page); mapping = (unsigned long)page->mapping; if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) return NULL; return __page_rmapping(page); } |
9800339b5
|
347 348 |
struct address_space *page_mapping(struct page *page) { |
1c290f642
|
349 350 351 |
struct address_space *mapping; page = compound_head(page); |
9800339b5
|
352 |
|
03e5ac2fc
|
353 354 355 |
/* This happens if someone calls flush_dcache_page on slab page */ if (unlikely(PageSlab(page))) return NULL; |
33806f06d
|
356 357 358 359 |
if (unlikely(PageSwapCache(page))) { swp_entry_t entry; entry.val = page_private(page); |
e39155ea1
|
360 361 |
return swap_address_space(entry); } |
1c290f642
|
362 363 |
mapping = page->mapping; if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) |
e39155ea1
|
364 |
return NULL; |
1c290f642
|
365 |
return mapping; |
9800339b5
|
366 |
} |
b20ce5e03
|
367 368 369 370 371 372 373 374 375 376 377 378 379 |
/* Slow path of page_mapcount() for compound pages */ int __page_mapcount(struct page *page) { int ret; ret = atomic_read(&page->_mapcount) + 1; page = compound_head(page); ret += atomic_read(compound_mapcount_ptr(page)) + 1; if (PageDoubleMap(page)) ret--; return ret; } EXPORT_SYMBOL_GPL(__page_mapcount); |
39a1aa8e1
|
380 381 382 383 384 385 |
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; int sysctl_overcommit_ratio __read_mostly = 50; unsigned long sysctl_overcommit_kbytes __read_mostly; int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ |
49f0ce5f9
|
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 |
int overcommit_ratio_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_kbytes = 0; return ret; } int overcommit_kbytes_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_ratio = 0; return ret; } |
00619bcc4
|
409 410 411 412 413 |
/* * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used */ unsigned long vm_commit_limit(void) { |
49f0ce5f9
|
414 415 416 417 418 419 420 421 422 423 |
unsigned long allowed; if (sysctl_overcommit_kbytes) allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); else allowed = ((totalram_pages - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100); allowed += total_swap_pages; return allowed; |
00619bcc4
|
424 |
} |
39a1aa8e1
|
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 |
/* * Make sure vm_committed_as in one cacheline and not cacheline shared with * other variables. It can be updated by several CPUs frequently. */ struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; /* * The global memory commitment made in the system can be a metric * that can be used to drive ballooning decisions when Linux is hosted * as a guest. On Hyper-V, the host implements a policy engine for dynamically * balancing memory across competing virtual machines that are hosted. * Several metrics drive this policy engine including the guest reported * memory commitment. */ unsigned long vm_memory_committed(void) { return percpu_counter_read_positive(&vm_committed_as); } EXPORT_SYMBOL_GPL(vm_memory_committed); /* * Check that a process has enough memory to allocate a new virtual * mapping. 0 means there is enough memory for the allocation to * succeed and -ENOMEM implies there is not. * * We currently support three overcommit policies, which are set via the * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting * * Strict overcommit modes added 2002 Feb 26 by Alan Cox. * Additional code 2002 Jul 20 by Robert Love. * * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. * * Note this is a helper function intended to be used by LSMs which * wish to use this logic. */ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) { long free, allowed, reserve; VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < -(s64)vm_committed_as_batch * num_online_cpus(), "memory commitment underflow"); vm_acct_memory(pages); /* * Sometimes we want to use more memory than we have */ if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) return 0; if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { free = global_page_state(NR_FREE_PAGES); free += global_page_state(NR_FILE_PAGES); /* * shmem pages shouldn't be counted as free in this * case, they can't be purged, only swapped out, and * that won't affect the overall amount of available * memory in the system. */ free -= global_page_state(NR_SHMEM); free += get_nr_swap_pages(); /* * Any slabs which are created with the * SLAB_RECLAIM_ACCOUNT flag claim to have contents * which are reclaimable, under pressure. The dentry * cache and most inode caches should fall into this */ free += global_page_state(NR_SLAB_RECLAIMABLE); /* * Leave reserved pages. The pages are not for anonymous pages. */ if (free <= totalreserve_pages) goto error; else free -= totalreserve_pages; /* * Reserve some for root */ if (!cap_sys_admin) free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); if (free > pages) return 0; goto error; } allowed = vm_commit_limit(); /* * Reserve some for root */ if (!cap_sys_admin) allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); /* * Don't let a single process grow so big a user can't recover */ if (mm) { reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); allowed -= min_t(long, mm->total_vm / 32, reserve); } if (percpu_counter_read_positive(&vm_committed_as) < allowed) return 0; error: vm_unacct_memory(pages); return -ENOMEM; } |
a90902531
|
541 542 543 544 545 546 547 548 549 550 551 552 553 554 |
/** * get_cmdline() - copy the cmdline value to a buffer. * @task: the task whose cmdline value to copy. * @buffer: the buffer to copy to. * @buflen: the length of the buffer. Larger cmdline values are truncated * to this length. * Returns the size of the cmdline field copied. Note that the copy does * not guarantee an ending NULL byte. */ int get_cmdline(struct task_struct *task, char *buffer, int buflen) { int res = 0; unsigned int len; struct mm_struct *mm = get_task_mm(task); |
a3b609ef9
|
555 |
unsigned long arg_start, arg_end, env_start, env_end; |
a90902531
|
556 557 558 559 |
if (!mm) goto out; if (!mm->arg_end) goto out_mm; /* Shh! No looking before we're done */ |
a3b609ef9
|
560 561 562 563 564 565 566 567 |
down_read(&mm->mmap_sem); arg_start = mm->arg_start; arg_end = mm->arg_end; env_start = mm->env_start; env_end = mm->env_end; up_read(&mm->mmap_sem); len = arg_end - arg_start; |
a90902531
|
568 569 570 |
if (len > buflen) len = buflen; |
a3b609ef9
|
571 |
res = access_process_vm(task, arg_start, buffer, len, 0); |
a90902531
|
572 573 574 575 576 577 578 579 580 581 |
/* * If the nul at the end of args has been overwritten, then * assume application is using setproctitle(3). */ if (res > 0 && buffer[res-1] != '\0' && len < buflen) { len = strnlen(buffer, res); if (len < res) { res = len; } else { |
a3b609ef9
|
582 |
len = env_end - env_start; |
a90902531
|
583 584 |
if (len > buflen - res) len = buflen - res; |
a3b609ef9
|
585 |
res += access_process_vm(task, env_start, |
a90902531
|
586 587 588 589 590 591 592 593 594 |
buffer+res, len, 0); res = strnlen(buffer, res); } } out_mm: mmput(mm); out: return res; } |