Blame view

mm/util.c 15.9 KB
16d69265b   Andrew Morton   uninline arch_pic...
1
  #include <linux/mm.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
2
3
  #include <linux/slab.h>
  #include <linux/string.h>
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
4
  #include <linux/compiler.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
5
  #include <linux/export.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
6
  #include <linux/err.h>
3b8f14b41   Adrian Bunk   mm/util.c must #i...
7
  #include <linux/sched.h>
eb36c5873   Al Viro   new helper: vm_mm...
8
  #include <linux/security.h>
9800339b5   Shaohua Li   mm: don't inline ...
9
  #include <linux/swap.h>
33806f06d   Shaohua Li   swap: make each s...
10
  #include <linux/swapops.h>
00619bcc4   Jerome Marchand   mm: factor commit...
11
12
  #include <linux/mman.h>
  #include <linux/hugetlb.h>
39f1f78d5   Al Viro   nick kvfree() fro...
13
  #include <linux/vmalloc.h>
00619bcc4   Jerome Marchand   mm: factor commit...
14

a4bb1e43e   Andrzej Hajda   mm/util: add kstr...
15
  #include <asm/sections.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
16
  #include <asm/uaccess.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
17

6038def0d   Namhyung Kim   mm: nommu: sort m...
18
  #include "internal.h"
a4bb1e43e   Andrzej Hajda   mm/util: add kstr...
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
  static inline int is_kernel_rodata(unsigned long addr)
  {
  	return addr >= (unsigned long)__start_rodata &&
  		addr < (unsigned long)__end_rodata;
  }
  
  /**
   * kfree_const - conditionally free memory
   * @x: pointer to the memory
   *
   * Function calls kfree only if @x is not in .rodata section.
   */
  void kfree_const(const void *x)
  {
  	if (!is_kernel_rodata((unsigned long)x))
  		kfree(x);
  }
  EXPORT_SYMBOL(kfree_const);
30992c97a   Matt Mackall   [PATCH] slob: int...
37
  /**
30992c97a   Matt Mackall   [PATCH] slob: int...
38
   * kstrdup - allocate space for and copy an existing string
30992c97a   Matt Mackall   [PATCH] slob: int...
39
40
41
42
43
44
45
46
47
48
49
50
   * @s: the string to duplicate
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrdup(const char *s, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strlen(s) + 1;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
51
  	buf = kmalloc_track_caller(len, gfp);
30992c97a   Matt Mackall   [PATCH] slob: int...
52
53
54
55
56
  	if (buf)
  		memcpy(buf, s, len);
  	return buf;
  }
  EXPORT_SYMBOL(kstrdup);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
57

1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
58
  /**
a4bb1e43e   Andrzej Hajda   mm/util: add kstr...
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
   * kstrdup_const - conditionally duplicate an existing const string
   * @s: the string to duplicate
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   *
   * Function returns source string if it is in .rodata section otherwise it
   * fallbacks to kstrdup.
   * Strings allocated by kstrdup_const should be freed by kfree_const.
   */
  const char *kstrdup_const(const char *s, gfp_t gfp)
  {
  	if (is_kernel_rodata((unsigned long)s))
  		return s;
  
  	return kstrdup(s, gfp);
  }
  EXPORT_SYMBOL(kstrdup_const);
  
  /**
1e66df3ee   Jeremy Fitzhardinge   add kstrndup
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
   * kstrndup - allocate space for and copy an existing string
   * @s: the string to duplicate
   * @max: read at most @max chars from @s
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrndup(const char *s, size_t max, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strnlen(s, max);
  	buf = kmalloc_track_caller(len+1, gfp);
  	if (buf) {
  		memcpy(buf, s, len);
  		buf[len] = '\0';
  	}
  	return buf;
  }
  EXPORT_SYMBOL(kstrndup);
  
  /**
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
101
102
103
104
105
106
107
108
109
   * kmemdup - duplicate region of memory
   *
   * @src: memory region to duplicate
   * @len: memory region length
   * @gfp: GFP mask to use
   */
  void *kmemdup(const void *src, size_t len, gfp_t gfp)
  {
  	void *p;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
110
  	p = kmalloc_track_caller(len, gfp);
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
111
112
113
114
115
  	if (p)
  		memcpy(p, src, len);
  	return p;
  }
  EXPORT_SYMBOL(kmemdup);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
116
  /**
610a77e04   Li Zefan   memdup_user(): in...
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
   * memdup_user - duplicate memory region from user space
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
   * Returns an ERR_PTR() on failure.
   */
  void *memdup_user(const void __user *src, size_t len)
  {
  	void *p;
  
  	/*
  	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
  	 * cause pagefault, which makes it pointless to use GFP_NOFS
  	 * or GFP_ATOMIC.
  	 */
  	p = kmalloc_track_caller(len, GFP_KERNEL);
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  
  	return p;
  }
  EXPORT_SYMBOL(memdup_user);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
145
146
  /*
   * strndup_user - duplicate an existing string from user space
96840aa00   Davi Arnaut   [PATCH] strndup_u...
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
   * @s: The string to duplicate
   * @n: Maximum number of bytes to copy, including the trailing NUL.
   */
  char *strndup_user(const char __user *s, long n)
  {
  	char *p;
  	long length;
  
  	length = strnlen_user(s, n);
  
  	if (!length)
  		return ERR_PTR(-EFAULT);
  
  	if (length > n)
  		return ERR_PTR(-EINVAL);
90d740455   Julia Lawall   mm: use memdup_user
162
  	p = memdup_user(s, length);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
163

90d740455   Julia Lawall   mm: use memdup_user
164
165
  	if (IS_ERR(p))
  		return p;
96840aa00   Davi Arnaut   [PATCH] strndup_u...
166
167
168
169
170
171
  
  	p[length - 1] = '\0';
  
  	return p;
  }
  EXPORT_SYMBOL(strndup_user);
16d69265b   Andrew Morton   uninline arch_pic...
172

e9d408e10   Al Viro   new helper: memdu...
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
  /**
   * memdup_user_nul - duplicate memory region from user space and NUL-terminate
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
   * Returns an ERR_PTR() on failure.
   */
  void *memdup_user_nul(const void __user *src, size_t len)
  {
  	char *p;
  
  	/*
  	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
  	 * cause pagefault, which makes it pointless to use GFP_NOFS
  	 * or GFP_ATOMIC.
  	 */
  	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  	p[len] = '\0';
  
  	return p;
  }
  EXPORT_SYMBOL(memdup_user_nul);
6038def0d   Namhyung Kim   mm: nommu: sort m...
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  		struct vm_area_struct *prev, struct rb_node *rb_parent)
  {
  	struct vm_area_struct *next;
  
  	vma->vm_prev = prev;
  	if (prev) {
  		next = prev->vm_next;
  		prev->vm_next = vma;
  	} else {
  		mm->mmap = vma;
  		if (rb_parent)
  			next = rb_entry(rb_parent,
  					struct vm_area_struct, vm_rb);
  		else
  			next = NULL;
  	}
  	vma->vm_next = next;
  	if (next)
  		next->vm_prev = vma;
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
224
  /* Check if the vma is being used as a stack by this task */
d17af5056   Andy Lutomirski   mm: Change vm_is_...
225
  int vma_is_stack_for_current(struct vm_area_struct *vma)
b76437579   Siddhesh Poyarekar   procfs: mark thre...
226
  {
d17af5056   Andy Lutomirski   mm: Change vm_is_...
227
  	struct task_struct * __maybe_unused t = current;
b76437579   Siddhesh Poyarekar   procfs: mark thre...
228
229
  	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
  }
efc1a3b16   David Howells   nommu: don't need...
230
  #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
16d69265b   Andrew Morton   uninline arch_pic...
231
232
233
234
  void arch_pick_mmap_layout(struct mm_struct *mm)
  {
  	mm->mmap_base = TASK_UNMAPPED_BASE;
  	mm->get_unmapped_area = arch_get_unmapped_area;
16d69265b   Andrew Morton   uninline arch_pic...
235
236
  }
  #endif
912985dce   Rusty Russell   mm: Make generic ...
237

45888a0c6   Xiao Guangrong   export __get_user...
238
239
240
  /*
   * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
   * back to the regular GUP.
25985edce   Lucas De Marchi   Fix common misspe...
241
   * If the architecture not support this function, simply return with no
45888a0c6   Xiao Guangrong   export __get_user...
242
243
   * page pinned
   */
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
244
  int __weak __get_user_pages_fast(unsigned long start,
45888a0c6   Xiao Guangrong   export __get_user...
245
246
247
248
249
  				 int nr_pages, int write, struct page **pages)
  {
  	return 0;
  }
  EXPORT_SYMBOL_GPL(__get_user_pages_fast);
9de100d00   Andy Grover   mm: document get_...
250
251
252
253
254
255
256
257
  /**
   * get_user_pages_fast() - pin user pages in memory
   * @start:	starting user address
   * @nr_pages:	number of pages from start to pin
   * @write:	whether pages will be written to
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long.
   *
9de100d00   Andy Grover   mm: document get_...
258
259
260
   * Returns number of pages pinned. This may be fewer than the number
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
   * were pinned, returns -errno.
d2bf6be8a   Nick Piggin   mm: clean up get_...
261
262
263
264
265
266
267
268
269
270
271
272
   *
   * get_user_pages_fast provides equivalent functionality to get_user_pages,
   * operating on current and current->mm, with force=0 and vma=NULL. However
   * unlike get_user_pages, it must be called without mmap_sem held.
   *
   * get_user_pages_fast may take mmap_sem and page table locks, so no
   * assumptions can be made about lack of locking. get_user_pages_fast is to be
   * implemented in a way that is advantageous (vs get_user_pages()) when the
   * user memory area is already faulted in and present in ptes. However if the
   * pages have to be faulted in, it may turn out to be slightly slower so
   * callers need to carefully consider what to use. On many architectures,
   * get_user_pages_fast simply falls back to get_user_pages.
9de100d00   Andy Grover   mm: document get_...
273
   */
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
274
  int __weak get_user_pages_fast(unsigned long start,
912985dce   Rusty Russell   mm: Make generic ...
275
276
  				int nr_pages, int write, struct page **pages)
  {
c164154f6   Lorenzo Stoakes   mm: replace get_u...
277
278
  	return get_user_pages_unlocked(start, nr_pages, pages,
  				       write ? FOLL_WRITE : 0);
912985dce   Rusty Russell   mm: Make generic ...
279
280
  }
  EXPORT_SYMBOL_GPL(get_user_pages_fast);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
281

eb36c5873   Al Viro   new helper: vm_mm...
282
283
  unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
9fbeb5ab5   Michal Hocko   mm: make vm_mmap ...
284
  	unsigned long flag, unsigned long pgoff)
eb36c5873   Al Viro   new helper: vm_mm...
285
286
287
  {
  	unsigned long ret;
  	struct mm_struct *mm = current->mm;
41badc15c   Michel Lespinasse   mm: make do_mmap_...
288
  	unsigned long populate;
eb36c5873   Al Viro   new helper: vm_mm...
289
290
291
  
  	ret = security_mmap_file(file, prot, flag);
  	if (!ret) {
9fbeb5ab5   Michal Hocko   mm: make vm_mmap ...
292
293
  		if (down_write_killable(&mm->mmap_sem))
  			return -EINTR;
bebeb3d68   Michel Lespinasse   mm: introduce mm_...
294
295
  		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
  				    &populate);
eb36c5873   Al Viro   new helper: vm_mm...
296
  		up_write(&mm->mmap_sem);
41badc15c   Michel Lespinasse   mm: make do_mmap_...
297
298
  		if (populate)
  			mm_populate(ret, populate);
eb36c5873   Al Viro   new helper: vm_mm...
299
300
301
302
303
304
305
306
307
308
  	}
  	return ret;
  }
  
  unsigned long vm_mmap(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
  	unsigned long flag, unsigned long offset)
  {
  	if (unlikely(offset + PAGE_ALIGN(len) < offset))
  		return -EINVAL;
ea53cde08   Alexander Kuleshov   mm/util: use offs...
309
  	if (unlikely(offset_in_page(offset)))
eb36c5873   Al Viro   new helper: vm_mm...
310
  		return -EINVAL;
9fbeb5ab5   Michal Hocko   mm: make vm_mmap ...
311
  	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
eb36c5873   Al Viro   new helper: vm_mm...
312
313
  }
  EXPORT_SYMBOL(vm_mmap);
39f1f78d5   Al Viro   nick kvfree() fro...
314
315
316
317
318
319
320
321
  void kvfree(const void *addr)
  {
  	if (is_vmalloc_addr(addr))
  		vfree(addr);
  	else
  		kfree(addr);
  }
  EXPORT_SYMBOL(kvfree);
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
  static inline void *__page_rmapping(struct page *page)
  {
  	unsigned long mapping;
  
  	mapping = (unsigned long)page->mapping;
  	mapping &= ~PAGE_MAPPING_FLAGS;
  
  	return (void *)mapping;
  }
  
  /* Neutral page->mapping pointer to address_space or anon_vma or other */
  void *page_rmapping(struct page *page)
  {
  	page = compound_head(page);
  	return __page_rmapping(page);
  }
1aa8aea53   Andrew Morton   mm: uninline page...
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
  /*
   * Return true if this page is mapped into pagetables.
   * For compound page it returns true if any subpage of compound page is mapped.
   */
  bool page_mapped(struct page *page)
  {
  	int i;
  
  	if (likely(!PageCompound(page)))
  		return atomic_read(&page->_mapcount) >= 0;
  	page = compound_head(page);
  	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
  		return true;
  	if (PageHuge(page))
  		return false;
  	for (i = 0; i < hpage_nr_pages(page); i++) {
  		if (atomic_read(&page[i]._mapcount) >= 0)
  			return true;
  	}
  	return false;
  }
  EXPORT_SYMBOL(page_mapped);
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
360
361
362
363
364
365
366
367
368
369
  struct anon_vma *page_anon_vma(struct page *page)
  {
  	unsigned long mapping;
  
  	page = compound_head(page);
  	mapping = (unsigned long)page->mapping;
  	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
  		return NULL;
  	return __page_rmapping(page);
  }
9800339b5   Shaohua Li   mm: don't inline ...
370
371
  struct address_space *page_mapping(struct page *page)
  {
1c290f642   Kirill A. Shutemov   mm: sanitize page...
372
373
374
  	struct address_space *mapping;
  
  	page = compound_head(page);
9800339b5   Shaohua Li   mm: don't inline ...
375

03e5ac2fc   Mikulas Patocka   mm: fix crash whe...
376
377
378
  	/* This happens if someone calls flush_dcache_page on slab page */
  	if (unlikely(PageSlab(page)))
  		return NULL;
33806f06d   Shaohua Li   swap: make each s...
379
380
381
382
  	if (unlikely(PageSwapCache(page))) {
  		swp_entry_t entry;
  
  		entry.val = page_private(page);
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
383
384
  		return swap_address_space(entry);
  	}
1c290f642   Kirill A. Shutemov   mm: sanitize page...
385
  	mapping = page->mapping;
bda807d44   Minchan Kim   mm: migrate: supp...
386
  	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
387
  		return NULL;
bda807d44   Minchan Kim   mm: migrate: supp...
388
389
  
  	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
9800339b5   Shaohua Li   mm: don't inline ...
390
  }
bda807d44   Minchan Kim   mm: migrate: supp...
391
  EXPORT_SYMBOL(page_mapping);
9800339b5   Shaohua Li   mm: don't inline ...
392

b20ce5e03   Kirill A. Shutemov   mm: prepare page_...
393
394
395
396
397
398
  /* Slow path of page_mapcount() for compound pages */
  int __page_mapcount(struct page *page)
  {
  	int ret;
  
  	ret = atomic_read(&page->_mapcount) + 1;
dd78fedde   Kirill A. Shutemov   rmap: support fil...
399
400
401
402
403
404
  	/*
  	 * For file THP page->_mapcount contains total number of mapping
  	 * of the page: no need to look into compound_mapcount.
  	 */
  	if (!PageAnon(page) && !PageHuge(page))
  		return ret;
b20ce5e03   Kirill A. Shutemov   mm: prepare page_...
405
406
407
408
409
410
411
  	page = compound_head(page);
  	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
  	if (PageDoubleMap(page))
  		ret--;
  	return ret;
  }
  EXPORT_SYMBOL_GPL(__page_mapcount);
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
412
413
414
415
416
417
  int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
  int sysctl_overcommit_ratio __read_mostly = 50;
  unsigned long sysctl_overcommit_kbytes __read_mostly;
  int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
  unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
  unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
49f0ce5f9   Jerome Marchand   mm: add overcommi...
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
  int overcommit_ratio_handler(struct ctl_table *table, int write,
  			     void __user *buffer, size_t *lenp,
  			     loff_t *ppos)
  {
  	int ret;
  
  	ret = proc_dointvec(table, write, buffer, lenp, ppos);
  	if (ret == 0 && write)
  		sysctl_overcommit_kbytes = 0;
  	return ret;
  }
  
  int overcommit_kbytes_handler(struct ctl_table *table, int write,
  			     void __user *buffer, size_t *lenp,
  			     loff_t *ppos)
  {
  	int ret;
  
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
  	if (ret == 0 && write)
  		sysctl_overcommit_ratio = 0;
  	return ret;
  }
00619bcc4   Jerome Marchand   mm: factor commit...
441
442
443
444
445
  /*
   * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
   */
  unsigned long vm_commit_limit(void)
  {
49f0ce5f9   Jerome Marchand   mm: add overcommi...
446
447
448
449
450
451
452
453
454
455
  	unsigned long allowed;
  
  	if (sysctl_overcommit_kbytes)
  		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
  	else
  		allowed = ((totalram_pages - hugetlb_total_pages())
  			   * sysctl_overcommit_ratio / 100);
  	allowed += total_swap_pages;
  
  	return allowed;
00619bcc4   Jerome Marchand   mm: factor commit...
456
  }
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
  /*
   * Make sure vm_committed_as in one cacheline and not cacheline shared with
   * other variables. It can be updated by several CPUs frequently.
   */
  struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
  
  /*
   * The global memory commitment made in the system can be a metric
   * that can be used to drive ballooning decisions when Linux is hosted
   * as a guest. On Hyper-V, the host implements a policy engine for dynamically
   * balancing memory across competing virtual machines that are hosted.
   * Several metrics drive this policy engine including the guest reported
   * memory commitment.
   */
  unsigned long vm_memory_committed(void)
  {
  	return percpu_counter_read_positive(&vm_committed_as);
  }
  EXPORT_SYMBOL_GPL(vm_memory_committed);
  
  /*
   * Check that a process has enough memory to allocate a new virtual
   * mapping. 0 means there is enough memory for the allocation to
   * succeed and -ENOMEM implies there is not.
   *
   * We currently support three overcommit policies, which are set via the
   * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
   *
   * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
   * Additional code 2002 Jul 20 by Robert Love.
   *
   * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
   *
   * Note this is a helper function intended to be used by LSMs which
   * wish to use this logic.
   */
  int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
  {
  	long free, allowed, reserve;
  
  	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
  			-(s64)vm_committed_as_batch * num_online_cpus(),
  			"memory commitment underflow");
  
  	vm_acct_memory(pages);
  
  	/*
  	 * Sometimes we want to use more memory than we have
  	 */
  	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
  		return 0;
  
  	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
  		free = global_page_state(NR_FREE_PAGES);
11fb99898   Mel Gorman   mm: move most fil...
511
  		free += global_node_page_state(NR_FILE_PAGES);
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
512
513
514
515
516
517
518
  
  		/*
  		 * shmem pages shouldn't be counted as free in this
  		 * case, they can't be purged, only swapped out, and
  		 * that won't affect the overall amount of available
  		 * memory in the system.
  		 */
11fb99898   Mel Gorman   mm: move most fil...
519
  		free -= global_node_page_state(NR_SHMEM);
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
  
  		free += get_nr_swap_pages();
  
  		/*
  		 * Any slabs which are created with the
  		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
  		 * which are reclaimable, under pressure.  The dentry
  		 * cache and most inode caches should fall into this
  		 */
  		free += global_page_state(NR_SLAB_RECLAIMABLE);
  
  		/*
  		 * Leave reserved pages. The pages are not for anonymous pages.
  		 */
  		if (free <= totalreserve_pages)
  			goto error;
  		else
  			free -= totalreserve_pages;
  
  		/*
  		 * Reserve some for root
  		 */
  		if (!cap_sys_admin)
  			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
  
  		if (free > pages)
  			return 0;
  
  		goto error;
  	}
  
  	allowed = vm_commit_limit();
  	/*
  	 * Reserve some for root
  	 */
  	if (!cap_sys_admin)
  		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
  
  	/*
  	 * Don't let a single process grow so big a user can't recover
  	 */
  	if (mm) {
  		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
  		allowed -= min_t(long, mm->total_vm / 32, reserve);
  	}
  
  	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
  		return 0;
  error:
  	vm_unacct_memory(pages);
  
  	return -ENOMEM;
  }
a90902531   William Roberts   mm: Create utilit...
573
574
575
576
577
578
579
580
581
582
583
584
585
586
  /**
   * get_cmdline() - copy the cmdline value to a buffer.
   * @task:     the task whose cmdline value to copy.
   * @buffer:   the buffer to copy to.
   * @buflen:   the length of the buffer. Larger cmdline values are truncated
   *            to this length.
   * Returns the size of the cmdline field copied. Note that the copy does
   * not guarantee an ending NULL byte.
   */
  int get_cmdline(struct task_struct *task, char *buffer, int buflen)
  {
  	int res = 0;
  	unsigned int len;
  	struct mm_struct *mm = get_task_mm(task);
a3b609ef9   Mateusz Guzik   proc read mm's {a...
587
  	unsigned long arg_start, arg_end, env_start, env_end;
a90902531   William Roberts   mm: Create utilit...
588
589
590
591
  	if (!mm)
  		goto out;
  	if (!mm->arg_end)
  		goto out_mm;	/* Shh! No looking before we're done */
a3b609ef9   Mateusz Guzik   proc read mm's {a...
592
593
594
595
596
597
598
599
  	down_read(&mm->mmap_sem);
  	arg_start = mm->arg_start;
  	arg_end = mm->arg_end;
  	env_start = mm->env_start;
  	env_end = mm->env_end;
  	up_read(&mm->mmap_sem);
  
  	len = arg_end - arg_start;
a90902531   William Roberts   mm: Create utilit...
600
601
602
  
  	if (len > buflen)
  		len = buflen;
f307ab6dc   Lorenzo Stoakes   mm: replace acces...
603
  	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
a90902531   William Roberts   mm: Create utilit...
604
605
606
607
608
609
610
611
612
613
  
  	/*
  	 * If the nul at the end of args has been overwritten, then
  	 * assume application is using setproctitle(3).
  	 */
  	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
  		len = strnlen(buffer, res);
  		if (len < res) {
  			res = len;
  		} else {
a3b609ef9   Mateusz Guzik   proc read mm's {a...
614
  			len = env_end - env_start;
a90902531   William Roberts   mm: Create utilit...
615
616
  			if (len > buflen - res)
  				len = buflen - res;
a3b609ef9   Mateusz Guzik   proc read mm's {a...
617
  			res += access_process_vm(task, env_start,
f307ab6dc   Lorenzo Stoakes   mm: replace acces...
618
619
  						 buffer+res, len,
  						 FOLL_FORCE);
a90902531   William Roberts   mm: Create utilit...
620
621
622
623
624
625
626
627
  			res = strnlen(buffer, res);
  		}
  	}
  out_mm:
  	mmput(mm);
  out:
  	return res;
  }