Blame view

mm/util.c 18.6 KB
16d69265b   Andrew Morton   uninline arch_pic...
1
  #include <linux/mm.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
2
3
  #include <linux/slab.h>
  #include <linux/string.h>
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
4
  #include <linux/compiler.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
5
  #include <linux/export.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
6
  #include <linux/err.h>
3b8f14b41   Adrian Bunk   mm/util.c must #i...
7
  #include <linux/sched.h>
6e84f3152   Ingo Molnar   sched/headers: Pr...
8
  #include <linux/sched/mm.h>
68db0cf10   Ingo Molnar   sched/headers: Pr...
9
  #include <linux/sched/task_stack.h>
eb36c5873   Al Viro   new helper: vm_mm...
10
  #include <linux/security.h>
9800339b5   Shaohua Li   mm: don't inline ...
11
  #include <linux/swap.h>
33806f06d   Shaohua Li   swap: make each s...
12
  #include <linux/swapops.h>
00619bcc4   Jerome Marchand   mm: factor commit...
13
14
  #include <linux/mman.h>
  #include <linux/hugetlb.h>
39f1f78d5   Al Viro   nick kvfree() fro...
15
  #include <linux/vmalloc.h>
897ab3e0c   Mike Rapoport   userfaultfd: non-...
16
  #include <linux/userfaultfd_k.h>
00619bcc4   Jerome Marchand   mm: factor commit...
17

a4bb1e43e   Andrzej Hajda   mm/util: add kstr...
18
  #include <asm/sections.h>
7c0f6ba68   Linus Torvalds   Replace <asm/uacc...
19
  #include <linux/uaccess.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
20

6038def0d   Namhyung Kim   mm: nommu: sort m...
21
  #include "internal.h"
a4bb1e43e   Andrzej Hajda   mm/util: add kstr...
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  static inline int is_kernel_rodata(unsigned long addr)
  {
  	return addr >= (unsigned long)__start_rodata &&
  		addr < (unsigned long)__end_rodata;
  }
  
  /**
   * kfree_const - conditionally free memory
   * @x: pointer to the memory
   *
   * Function calls kfree only if @x is not in .rodata section.
   */
  void kfree_const(const void *x)
  {
  	if (!is_kernel_rodata((unsigned long)x))
  		kfree(x);
  }
  EXPORT_SYMBOL(kfree_const);
30992c97a   Matt Mackall   [PATCH] slob: int...
40
  /**
30992c97a   Matt Mackall   [PATCH] slob: int...
41
   * kstrdup - allocate space for and copy an existing string
30992c97a   Matt Mackall   [PATCH] slob: int...
42
43
44
45
46
47
48
49
50
51
52
53
   * @s: the string to duplicate
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrdup(const char *s, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strlen(s) + 1;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
54
  	buf = kmalloc_track_caller(len, gfp);
30992c97a   Matt Mackall   [PATCH] slob: int...
55
56
57
58
59
  	if (buf)
  		memcpy(buf, s, len);
  	return buf;
  }
  EXPORT_SYMBOL(kstrdup);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
60

1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
61
  /**
a4bb1e43e   Andrzej Hajda   mm/util: add kstr...
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
   * kstrdup_const - conditionally duplicate an existing const string
   * @s: the string to duplicate
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   *
   * Function returns source string if it is in .rodata section otherwise it
   * fallbacks to kstrdup.
   * Strings allocated by kstrdup_const should be freed by kfree_const.
   */
  const char *kstrdup_const(const char *s, gfp_t gfp)
  {
  	if (is_kernel_rodata((unsigned long)s))
  		return s;
  
  	return kstrdup(s, gfp);
  }
  EXPORT_SYMBOL(kstrdup_const);
  
  /**
1e66df3ee   Jeremy Fitzhardinge   add kstrndup
80
81
82
83
   * kstrndup - allocate space for and copy an existing string
   * @s: the string to duplicate
   * @max: read at most @max chars from @s
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
f35157417   David Howells   Provide a functio...
84
85
   *
   * Note: Use kmemdup_nul() instead if the size is known exactly.
1e66df3ee   Jeremy Fitzhardinge   add kstrndup
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
   */
  char *kstrndup(const char *s, size_t max, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strnlen(s, max);
  	buf = kmalloc_track_caller(len+1, gfp);
  	if (buf) {
  		memcpy(buf, s, len);
  		buf[len] = '\0';
  	}
  	return buf;
  }
  EXPORT_SYMBOL(kstrndup);
  
  /**
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
106
107
108
109
110
111
112
113
114
   * kmemdup - duplicate region of memory
   *
   * @src: memory region to duplicate
   * @len: memory region length
   * @gfp: GFP mask to use
   */
  void *kmemdup(const void *src, size_t len, gfp_t gfp)
  {
  	void *p;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
115
  	p = kmalloc_track_caller(len, gfp);
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
116
117
118
119
120
  	if (p)
  		memcpy(p, src, len);
  	return p;
  }
  EXPORT_SYMBOL(kmemdup);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
121
  /**
f35157417   David Howells   Provide a functio...
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
   * kmemdup_nul - Create a NUL-terminated string from unterminated data
   * @s: The data to stringify
   * @len: The size of the data
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
  {
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	buf = kmalloc_track_caller(len + 1, gfp);
  	if (buf) {
  		memcpy(buf, s, len);
  		buf[len] = '\0';
  	}
  	return buf;
  }
  EXPORT_SYMBOL(kmemdup_nul);
  
  /**
610a77e04   Li Zefan   memdup_user(): in...
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
   * memdup_user - duplicate memory region from user space
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
   * Returns an ERR_PTR() on failure.
   */
  void *memdup_user(const void __user *src, size_t len)
  {
  	void *p;
  
  	/*
  	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
  	 * cause pagefault, which makes it pointless to use GFP_NOFS
  	 * or GFP_ATOMIC.
  	 */
  	p = kmalloc_track_caller(len, GFP_KERNEL);
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  
  	return p;
  }
  EXPORT_SYMBOL(memdup_user);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
172
173
  /*
   * strndup_user - duplicate an existing string from user space
96840aa00   Davi Arnaut   [PATCH] strndup_u...
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
   * @s: The string to duplicate
   * @n: Maximum number of bytes to copy, including the trailing NUL.
   */
  char *strndup_user(const char __user *s, long n)
  {
  	char *p;
  	long length;
  
  	length = strnlen_user(s, n);
  
  	if (!length)
  		return ERR_PTR(-EFAULT);
  
  	if (length > n)
  		return ERR_PTR(-EINVAL);
90d740455   Julia Lawall   mm: use memdup_user
189
  	p = memdup_user(s, length);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
190

90d740455   Julia Lawall   mm: use memdup_user
191
192
  	if (IS_ERR(p))
  		return p;
96840aa00   Davi Arnaut   [PATCH] strndup_u...
193
194
195
196
197
198
  
  	p[length - 1] = '\0';
  
  	return p;
  }
  EXPORT_SYMBOL(strndup_user);
16d69265b   Andrew Morton   uninline arch_pic...
199

e9d408e10   Al Viro   new helper: memdu...
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  /**
   * memdup_user_nul - duplicate memory region from user space and NUL-terminate
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
   * Returns an ERR_PTR() on failure.
   */
  void *memdup_user_nul(const void __user *src, size_t len)
  {
  	char *p;
  
  	/*
  	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
  	 * cause pagefault, which makes it pointless to use GFP_NOFS
  	 * or GFP_ATOMIC.
  	 */
  	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  	p[len] = '\0';
  
  	return p;
  }
  EXPORT_SYMBOL(memdup_user_nul);
6038def0d   Namhyung Kim   mm: nommu: sort m...
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  		struct vm_area_struct *prev, struct rb_node *rb_parent)
  {
  	struct vm_area_struct *next;
  
  	vma->vm_prev = prev;
  	if (prev) {
  		next = prev->vm_next;
  		prev->vm_next = vma;
  	} else {
  		mm->mmap = vma;
  		if (rb_parent)
  			next = rb_entry(rb_parent,
  					struct vm_area_struct, vm_rb);
  		else
  			next = NULL;
  	}
  	vma->vm_next = next;
  	if (next)
  		next->vm_prev = vma;
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
251
  /* Check if the vma is being used as a stack by this task */
d17af5056   Andy Lutomirski   mm: Change vm_is_...
252
  int vma_is_stack_for_current(struct vm_area_struct *vma)
b76437579   Siddhesh Poyarekar   procfs: mark thre...
253
  {
d17af5056   Andy Lutomirski   mm: Change vm_is_...
254
  	struct task_struct * __maybe_unused t = current;
b76437579   Siddhesh Poyarekar   procfs: mark thre...
255
256
  	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
  }
efc1a3b16   David Howells   nommu: don't need...
257
  #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
16d69265b   Andrew Morton   uninline arch_pic...
258
259
260
261
  void arch_pick_mmap_layout(struct mm_struct *mm)
  {
  	mm->mmap_base = TASK_UNMAPPED_BASE;
  	mm->get_unmapped_area = arch_get_unmapped_area;
16d69265b   Andrew Morton   uninline arch_pic...
262
263
  }
  #endif
912985dce   Rusty Russell   mm: Make generic ...
264

45888a0c6   Xiao Guangrong   export __get_user...
265
266
267
  /*
   * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
   * back to the regular GUP.
25985edce   Lucas De Marchi   Fix common misspe...
268
   * If the architecture not support this function, simply return with no
45888a0c6   Xiao Guangrong   export __get_user...
269
270
   * page pinned
   */
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
271
  int __weak __get_user_pages_fast(unsigned long start,
45888a0c6   Xiao Guangrong   export __get_user...
272
273
274
275
276
  				 int nr_pages, int write, struct page **pages)
  {
  	return 0;
  }
  EXPORT_SYMBOL_GPL(__get_user_pages_fast);
9de100d00   Andy Grover   mm: document get_...
277
278
279
280
281
282
283
284
  /**
   * get_user_pages_fast() - pin user pages in memory
   * @start:	starting user address
   * @nr_pages:	number of pages from start to pin
   * @write:	whether pages will be written to
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long.
   *
9de100d00   Andy Grover   mm: document get_...
285
286
287
   * Returns number of pages pinned. This may be fewer than the number
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
   * were pinned, returns -errno.
d2bf6be8a   Nick Piggin   mm: clean up get_...
288
289
290
291
292
293
294
295
296
297
298
299
   *
   * get_user_pages_fast provides equivalent functionality to get_user_pages,
   * operating on current and current->mm, with force=0 and vma=NULL. However
   * unlike get_user_pages, it must be called without mmap_sem held.
   *
   * get_user_pages_fast may take mmap_sem and page table locks, so no
   * assumptions can be made about lack of locking. get_user_pages_fast is to be
   * implemented in a way that is advantageous (vs get_user_pages()) when the
   * user memory area is already faulted in and present in ptes. However if the
   * pages have to be faulted in, it may turn out to be slightly slower so
   * callers need to carefully consider what to use. On many architectures,
   * get_user_pages_fast simply falls back to get_user_pages.
9de100d00   Andy Grover   mm: document get_...
300
   */
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
301
  int __weak get_user_pages_fast(unsigned long start,
912985dce   Rusty Russell   mm: Make generic ...
302
303
  				int nr_pages, int write, struct page **pages)
  {
c164154f6   Lorenzo Stoakes   mm: replace get_u...
304
305
  	return get_user_pages_unlocked(start, nr_pages, pages,
  				       write ? FOLL_WRITE : 0);
912985dce   Rusty Russell   mm: Make generic ...
306
307
  }
  EXPORT_SYMBOL_GPL(get_user_pages_fast);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
308

eb36c5873   Al Viro   new helper: vm_mm...
309
310
  unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
9fbeb5ab5   Michal Hocko   mm: make vm_mmap ...
311
  	unsigned long flag, unsigned long pgoff)
eb36c5873   Al Viro   new helper: vm_mm...
312
313
314
  {
  	unsigned long ret;
  	struct mm_struct *mm = current->mm;
41badc15c   Michel Lespinasse   mm: make do_mmap_...
315
  	unsigned long populate;
897ab3e0c   Mike Rapoport   userfaultfd: non-...
316
  	LIST_HEAD(uf);
eb36c5873   Al Viro   new helper: vm_mm...
317
318
319
  
  	ret = security_mmap_file(file, prot, flag);
  	if (!ret) {
9fbeb5ab5   Michal Hocko   mm: make vm_mmap ...
320
321
  		if (down_write_killable(&mm->mmap_sem))
  			return -EINTR;
bebeb3d68   Michel Lespinasse   mm: introduce mm_...
322
  		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
897ab3e0c   Mike Rapoport   userfaultfd: non-...
323
  				    &populate, &uf);
eb36c5873   Al Viro   new helper: vm_mm...
324
  		up_write(&mm->mmap_sem);
897ab3e0c   Mike Rapoport   userfaultfd: non-...
325
  		userfaultfd_unmap_complete(mm, &uf);
41badc15c   Michel Lespinasse   mm: make do_mmap_...
326
327
  		if (populate)
  			mm_populate(ret, populate);
eb36c5873   Al Viro   new helper: vm_mm...
328
329
330
331
332
333
334
335
336
337
  	}
  	return ret;
  }
  
  unsigned long vm_mmap(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
  	unsigned long flag, unsigned long offset)
  {
  	if (unlikely(offset + PAGE_ALIGN(len) < offset))
  		return -EINVAL;
ea53cde08   Alexander Kuleshov   mm/util: use offs...
338
  	if (unlikely(offset_in_page(offset)))
eb36c5873   Al Viro   new helper: vm_mm...
339
  		return -EINVAL;
9fbeb5ab5   Michal Hocko   mm: make vm_mmap ...
340
  	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
eb36c5873   Al Viro   new helper: vm_mm...
341
342
  }
  EXPORT_SYMBOL(vm_mmap);
a7c3e901a   Michal Hocko   mm: introduce kv[...
343
344
345
346
347
348
349
350
351
352
  /**
   * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
   * failure, fall back to non-contiguous (vmalloc) allocation.
   * @size: size of the request.
   * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
   * @node: numa node to allocate from
   *
   * Uses kmalloc to get the memory but if the allocation fails then falls back
   * to the vmalloc allocator. Use kvfree for freeing the memory.
   *
cc965a29d   Michal Hocko   mm: kvmalloc supp...
353
354
355
   * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
   * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
   * preferable to the vmalloc fallback, due to visible performance drawbacks.
a7c3e901a   Michal Hocko   mm: introduce kv[...
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
   *
   * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people.
   */
  void *kvmalloc_node(size_t size, gfp_t flags, int node)
  {
  	gfp_t kmalloc_flags = flags;
  	void *ret;
  
  	/*
  	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
  	 * so the given set of flags has to be compatible.
  	 */
  	WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
  
  	/*
4f4f2ba9c   Michal Hocko   mm: clarify why w...
371
372
373
374
375
  	 * We want to attempt a large physically contiguous block first because
  	 * it is less likely to fragment multiple larger blocks and therefore
  	 * contribute to a long term fragmentation less than vmalloc fallback.
  	 * However make sure that larger requests are not too disruptive - no
  	 * OOM killer and no allocation failure warnings as we have a fallback.
a7c3e901a   Michal Hocko   mm: introduce kv[...
376
  	 */
6c5ab6511   Michal Hocko   mm: support __GFP...
377
378
  	if (size > PAGE_SIZE) {
  		kmalloc_flags |= __GFP_NOWARN;
cc965a29d   Michal Hocko   mm: kvmalloc supp...
379
  		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
6c5ab6511   Michal Hocko   mm: support __GFP...
380
381
  			kmalloc_flags |= __GFP_NORETRY;
  	}
a7c3e901a   Michal Hocko   mm: introduce kv[...
382
383
384
385
386
387
388
389
390
  
  	ret = kmalloc_node(size, kmalloc_flags, node);
  
  	/*
  	 * It doesn't really make sense to fallback to vmalloc for sub page
  	 * requests
  	 */
  	if (ret || size <= PAGE_SIZE)
  		return ret;
8594a21cf   Michal Hocko   mm, vmalloc: fix ...
391
392
  	return __vmalloc_node_flags_caller(size, node, flags,
  			__builtin_return_address(0));
a7c3e901a   Michal Hocko   mm: introduce kv[...
393
394
  }
  EXPORT_SYMBOL(kvmalloc_node);
39f1f78d5   Al Viro   nick kvfree() fro...
395
396
397
398
399
400
401
402
  void kvfree(const void *addr)
  {
  	if (is_vmalloc_addr(addr))
  		vfree(addr);
  	else
  		kfree(addr);
  }
  EXPORT_SYMBOL(kvfree);
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
  static inline void *__page_rmapping(struct page *page)
  {
  	unsigned long mapping;
  
  	mapping = (unsigned long)page->mapping;
  	mapping &= ~PAGE_MAPPING_FLAGS;
  
  	return (void *)mapping;
  }
  
  /* Neutral page->mapping pointer to address_space or anon_vma or other */
  void *page_rmapping(struct page *page)
  {
  	page = compound_head(page);
  	return __page_rmapping(page);
  }
1aa8aea53   Andrew Morton   mm: uninline page...
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
  /*
   * Return true if this page is mapped into pagetables.
   * For compound page it returns true if any subpage of compound page is mapped.
   */
  bool page_mapped(struct page *page)
  {
  	int i;
  
  	if (likely(!PageCompound(page)))
  		return atomic_read(&page->_mapcount) >= 0;
  	page = compound_head(page);
  	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
  		return true;
  	if (PageHuge(page))
  		return false;
e973b3929   Jan Stancek   mm: page_mapped: ...
434
  	for (i = 0; i < (1 << compound_order(page)); i++) {
1aa8aea53   Andrew Morton   mm: uninline page...
435
436
437
438
439
440
  		if (atomic_read(&page[i]._mapcount) >= 0)
  			return true;
  	}
  	return false;
  }
  EXPORT_SYMBOL(page_mapped);
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
441
442
443
444
445
446
447
448
449
450
  struct anon_vma *page_anon_vma(struct page *page)
  {
  	unsigned long mapping;
  
  	page = compound_head(page);
  	mapping = (unsigned long)page->mapping;
  	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
  		return NULL;
  	return __page_rmapping(page);
  }
9800339b5   Shaohua Li   mm: don't inline ...
451
452
  struct address_space *page_mapping(struct page *page)
  {
1c290f642   Kirill A. Shutemov   mm: sanitize page...
453
454
455
  	struct address_space *mapping;
  
  	page = compound_head(page);
9800339b5   Shaohua Li   mm: don't inline ...
456

03e5ac2fc   Mikulas Patocka   mm: fix crash whe...
457
458
459
  	/* This happens if someone calls flush_dcache_page on slab page */
  	if (unlikely(PageSlab(page)))
  		return NULL;
33806f06d   Shaohua Li   swap: make each s...
460
461
462
463
  	if (unlikely(PageSwapCache(page))) {
  		swp_entry_t entry;
  
  		entry.val = page_private(page);
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
464
465
  		return swap_address_space(entry);
  	}
1c290f642   Kirill A. Shutemov   mm: sanitize page...
466
  	mapping = page->mapping;
bda807d44   Minchan Kim   mm: migrate: supp...
467
  	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
468
  		return NULL;
bda807d44   Minchan Kim   mm: migrate: supp...
469
470
  
  	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
9800339b5   Shaohua Li   mm: don't inline ...
471
  }
bda807d44   Minchan Kim   mm: migrate: supp...
472
  EXPORT_SYMBOL(page_mapping);
9800339b5   Shaohua Li   mm: don't inline ...
473

b20ce5e03   Kirill A. Shutemov   mm: prepare page_...
474
475
476
477
478
479
  /* Slow path of page_mapcount() for compound pages */
  int __page_mapcount(struct page *page)
  {
  	int ret;
  
  	ret = atomic_read(&page->_mapcount) + 1;
dd78fedde   Kirill A. Shutemov   rmap: support fil...
480
481
482
483
484
485
  	/*
  	 * For file THP page->_mapcount contains total number of mapping
  	 * of the page: no need to look into compound_mapcount.
  	 */
  	if (!PageAnon(page) && !PageHuge(page))
  		return ret;
b20ce5e03   Kirill A. Shutemov   mm: prepare page_...
486
487
488
489
490
491
492
  	page = compound_head(page);
  	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
  	if (PageDoubleMap(page))
  		ret--;
  	return ret;
  }
  EXPORT_SYMBOL_GPL(__page_mapcount);
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
493
494
495
496
497
498
  int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
  int sysctl_overcommit_ratio __read_mostly = 50;
  unsigned long sysctl_overcommit_kbytes __read_mostly;
  int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
  unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
  unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
49f0ce5f9   Jerome Marchand   mm: add overcommi...
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
  int overcommit_ratio_handler(struct ctl_table *table, int write,
  			     void __user *buffer, size_t *lenp,
  			     loff_t *ppos)
  {
  	int ret;
  
  	ret = proc_dointvec(table, write, buffer, lenp, ppos);
  	if (ret == 0 && write)
  		sysctl_overcommit_kbytes = 0;
  	return ret;
  }
  
  int overcommit_kbytes_handler(struct ctl_table *table, int write,
  			     void __user *buffer, size_t *lenp,
  			     loff_t *ppos)
  {
  	int ret;
  
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
  	if (ret == 0 && write)
  		sysctl_overcommit_ratio = 0;
  	return ret;
  }
00619bcc4   Jerome Marchand   mm: factor commit...
522
523
524
525
526
  /*
   * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
   */
  unsigned long vm_commit_limit(void)
  {
49f0ce5f9   Jerome Marchand   mm: add overcommi...
527
528
529
530
531
532
533
534
535
536
  	unsigned long allowed;
  
  	if (sysctl_overcommit_kbytes)
  		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
  	else
  		allowed = ((totalram_pages - hugetlb_total_pages())
  			   * sysctl_overcommit_ratio / 100);
  	allowed += total_swap_pages;
  
  	return allowed;
00619bcc4   Jerome Marchand   mm: factor commit...
537
  }
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
  /*
   * Make sure vm_committed_as in one cacheline and not cacheline shared with
   * other variables. It can be updated by several CPUs frequently.
   */
  struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
  
  /*
   * The global memory commitment made in the system can be a metric
   * that can be used to drive ballooning decisions when Linux is hosted
   * as a guest. On Hyper-V, the host implements a policy engine for dynamically
   * balancing memory across competing virtual machines that are hosted.
   * Several metrics drive this policy engine including the guest reported
   * memory commitment.
   */
  unsigned long vm_memory_committed(void)
  {
  	return percpu_counter_read_positive(&vm_committed_as);
  }
  EXPORT_SYMBOL_GPL(vm_memory_committed);
  
  /*
   * Check that a process has enough memory to allocate a new virtual
   * mapping. 0 means there is enough memory for the allocation to
   * succeed and -ENOMEM implies there is not.
   *
   * We currently support three overcommit policies, which are set via the
   * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
   *
   * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
   * Additional code 2002 Jul 20 by Robert Love.
   *
   * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
   *
   * Note this is a helper function intended to be used by LSMs which
   * wish to use this logic.
   */
  int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
  {
  	long free, allowed, reserve;
  
  	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
  			-(s64)vm_committed_as_batch * num_online_cpus(),
  			"memory commitment underflow");
  
  	vm_acct_memory(pages);
  
  	/*
  	 * Sometimes we want to use more memory than we have
  	 */
  	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
  		return 0;
  
  	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
c41f012ad   Michal Hocko   mm: rename global...
591
  		free = global_zone_page_state(NR_FREE_PAGES);
11fb99898   Mel Gorman   mm: move most fil...
592
  		free += global_node_page_state(NR_FILE_PAGES);
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
593
594
595
596
597
598
599
  
  		/*
  		 * shmem pages shouldn't be counted as free in this
  		 * case, they can't be purged, only swapped out, and
  		 * that won't affect the overall amount of available
  		 * memory in the system.
  		 */
11fb99898   Mel Gorman   mm: move most fil...
600
  		free -= global_node_page_state(NR_SHMEM);
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
601
602
603
604
605
606
607
608
609
  
  		free += get_nr_swap_pages();
  
  		/*
  		 * Any slabs which are created with the
  		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
  		 * which are reclaimable, under pressure.  The dentry
  		 * cache and most inode caches should fall into this
  		 */
d507e2ebd   Johannes Weiner   mm: fix global NR...
610
  		free += global_node_page_state(NR_SLAB_RECLAIMABLE);
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
611
612
  
  		/*
5de69d648   Roman Gushchin   mm: treat indirec...
613
614
615
616
617
618
619
  		 * Part of the kernel memory, which can be released
  		 * under memory pressure.
  		 */
  		free += global_node_page_state(
  			NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
  
  		/*
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
  		 * Leave reserved pages. The pages are not for anonymous pages.
  		 */
  		if (free <= totalreserve_pages)
  			goto error;
  		else
  			free -= totalreserve_pages;
  
  		/*
  		 * Reserve some for root
  		 */
  		if (!cap_sys_admin)
  			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
  
  		if (free > pages)
  			return 0;
  
  		goto error;
  	}
  
  	allowed = vm_commit_limit();
  	/*
  	 * Reserve some for root
  	 */
  	if (!cap_sys_admin)
  		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
  
  	/*
  	 * Don't let a single process grow so big a user can't recover
  	 */
  	if (mm) {
  		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
  		allowed -= min_t(long, mm->total_vm / 32, reserve);
  	}
  
  	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
  		return 0;
  error:
  	vm_unacct_memory(pages);
  
  	return -ENOMEM;
  }
a90902531   William Roberts   mm: Create utilit...
661
662
663
664
665
666
667
668
669
670
671
672
673
674
  /**
   * get_cmdline() - copy the cmdline value to a buffer.
   * @task:     the task whose cmdline value to copy.
   * @buffer:   the buffer to copy to.
   * @buflen:   the length of the buffer. Larger cmdline values are truncated
   *            to this length.
   * Returns the size of the cmdline field copied. Note that the copy does
   * not guarantee an ending NULL byte.
   */
  int get_cmdline(struct task_struct *task, char *buffer, int buflen)
  {
  	int res = 0;
  	unsigned int len;
  	struct mm_struct *mm = get_task_mm(task);
a3b609ef9   Mateusz Guzik   proc read mm's {a...
675
  	unsigned long arg_start, arg_end, env_start, env_end;
a90902531   William Roberts   mm: Create utilit...
676
677
678
679
  	if (!mm)
  		goto out;
  	if (!mm->arg_end)
  		goto out_mm;	/* Shh! No looking before we're done */
a3b609ef9   Mateusz Guzik   proc read mm's {a...
680
681
682
683
684
685
686
687
  	down_read(&mm->mmap_sem);
  	arg_start = mm->arg_start;
  	arg_end = mm->arg_end;
  	env_start = mm->env_start;
  	env_end = mm->env_end;
  	up_read(&mm->mmap_sem);
  
  	len = arg_end - arg_start;
a90902531   William Roberts   mm: Create utilit...
688
689
690
  
  	if (len > buflen)
  		len = buflen;
f307ab6dc   Lorenzo Stoakes   mm: replace acces...
691
  	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
a90902531   William Roberts   mm: Create utilit...
692
693
694
695
696
697
698
699
700
701
  
  	/*
  	 * If the nul at the end of args has been overwritten, then
  	 * assume application is using setproctitle(3).
  	 */
  	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
  		len = strnlen(buffer, res);
  		if (len < res) {
  			res = len;
  		} else {
a3b609ef9   Mateusz Guzik   proc read mm's {a...
702
  			len = env_end - env_start;
a90902531   William Roberts   mm: Create utilit...
703
704
  			if (len > buflen - res)
  				len = buflen - res;
a3b609ef9   Mateusz Guzik   proc read mm's {a...
705
  			res += access_process_vm(task, env_start,
f307ab6dc   Lorenzo Stoakes   mm: replace acces...
706
707
  						 buffer+res, len,
  						 FOLL_FORCE);
a90902531   William Roberts   mm: Create utilit...
708
709
710
711
712
713
714
715
  			res = strnlen(buffer, res);
  		}
  	}
  out_mm:
  	mmput(mm);
  out:
  	return res;
  }