Blame view

mm/util.c 9.38 KB
16d69265b   Andrew Morton   uninline arch_pic...
1
  #include <linux/mm.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
2
3
  #include <linux/slab.h>
  #include <linux/string.h>
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
4
  #include <linux/compiler.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
5
  #include <linux/export.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
6
  #include <linux/err.h>
3b8f14b41   Adrian Bunk   mm/util.c must #i...
7
  #include <linux/sched.h>
eb36c5873   Al Viro   new helper: vm_mm...
8
  #include <linux/security.h>
9800339b5   Shaohua Li   mm: don't inline ...
9
  #include <linux/swap.h>
33806f06d   Shaohua Li   swap: make each s...
10
  #include <linux/swapops.h>
00619bcc4   Jerome Marchand   mm: factor commit...
11
12
  #include <linux/mman.h>
  #include <linux/hugetlb.h>
39f1f78d5   Al Viro   nick kvfree() fro...
13
  #include <linux/vmalloc.h>
00619bcc4   Jerome Marchand   mm: factor commit...
14

96840aa00   Davi Arnaut   [PATCH] strndup_u...
15
  #include <asm/uaccess.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
16

6038def0d   Namhyung Kim   mm: nommu: sort m...
17
  #include "internal.h"
30992c97a   Matt Mackall   [PATCH] slob: int...
18
  /**
30992c97a   Matt Mackall   [PATCH] slob: int...
19
   * kstrdup - allocate space for and copy an existing string
30992c97a   Matt Mackall   [PATCH] slob: int...
20
21
22
23
24
25
26
27
28
29
30
31
   * @s: the string to duplicate
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrdup(const char *s, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strlen(s) + 1;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
32
  	buf = kmalloc_track_caller(len, gfp);
30992c97a   Matt Mackall   [PATCH] slob: int...
33
34
35
36
37
  	if (buf)
  		memcpy(buf, s, len);
  	return buf;
  }
  EXPORT_SYMBOL(kstrdup);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
38

1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
39
  /**
1e66df3ee   Jeremy Fitzhardinge   add kstrndup
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
   * kstrndup - allocate space for and copy an existing string
   * @s: the string to duplicate
   * @max: read at most @max chars from @s
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrndup(const char *s, size_t max, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strnlen(s, max);
  	buf = kmalloc_track_caller(len+1, gfp);
  	if (buf) {
  		memcpy(buf, s, len);
  		buf[len] = '\0';
  	}
  	return buf;
  }
  EXPORT_SYMBOL(kstrndup);
  
  /**
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
64
65
66
67
68
69
70
71
72
   * kmemdup - duplicate region of memory
   *
   * @src: memory region to duplicate
   * @len: memory region length
   * @gfp: GFP mask to use
   */
  void *kmemdup(const void *src, size_t len, gfp_t gfp)
  {
  	void *p;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
73
  	p = kmalloc_track_caller(len, gfp);
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
74
75
76
77
78
  	if (p)
  		memcpy(p, src, len);
  	return p;
  }
  EXPORT_SYMBOL(kmemdup);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
79
  /**
610a77e04   Li Zefan   memdup_user(): in...
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
   * memdup_user - duplicate memory region from user space
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
   * Returns an ERR_PTR() on failure.
   */
  void *memdup_user(const void __user *src, size_t len)
  {
  	void *p;
  
  	/*
  	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
  	 * cause pagefault, which makes it pointless to use GFP_NOFS
  	 * or GFP_ATOMIC.
  	 */
  	p = kmalloc_track_caller(len, GFP_KERNEL);
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  
  	return p;
  }
  EXPORT_SYMBOL(memdup_user);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
108
109
  /*
   * strndup_user - duplicate an existing string from user space
96840aa00   Davi Arnaut   [PATCH] strndup_u...
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
   * @s: The string to duplicate
   * @n: Maximum number of bytes to copy, including the trailing NUL.
   */
  char *strndup_user(const char __user *s, long n)
  {
  	char *p;
  	long length;
  
  	length = strnlen_user(s, n);
  
  	if (!length)
  		return ERR_PTR(-EFAULT);
  
  	if (length > n)
  		return ERR_PTR(-EINVAL);
90d740455   Julia Lawall   mm: use memdup_user
125
  	p = memdup_user(s, length);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
126

90d740455   Julia Lawall   mm: use memdup_user
127
128
  	if (IS_ERR(p))
  		return p;
96840aa00   Davi Arnaut   [PATCH] strndup_u...
129
130
131
132
133
134
  
  	p[length - 1] = '\0';
  
  	return p;
  }
  EXPORT_SYMBOL(strndup_user);
16d69265b   Andrew Morton   uninline arch_pic...
135

6038def0d   Namhyung Kim   mm: nommu: sort m...
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  		struct vm_area_struct *prev, struct rb_node *rb_parent)
  {
  	struct vm_area_struct *next;
  
  	vma->vm_prev = prev;
  	if (prev) {
  		next = prev->vm_next;
  		prev->vm_next = vma;
  	} else {
  		mm->mmap = vma;
  		if (rb_parent)
  			next = rb_entry(rb_parent,
  					struct vm_area_struct, vm_rb);
  		else
  			next = NULL;
  	}
  	vma->vm_next = next;
  	if (next)
  		next->vm_prev = vma;
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
157
158
159
160
161
162
163
164
165
166
  /* Check if the vma is being used as a stack by this task */
  static int vm_is_stack_for_task(struct task_struct *t,
  				struct vm_area_struct *vma)
  {
  	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
  }
  
  /*
   * Check if the vma is being used as a stack.
   * If is_group is non-zero, check in the entire thread group or else
58cb65487   Oleg Nesterov   proc/maps: make v...
167
168
   * just check in the current task. Returns the task_struct of the task
   * that the vma is stack for. Must be called under rcu_read_lock().
b76437579   Siddhesh Poyarekar   procfs: mark thre...
169
   */
58cb65487   Oleg Nesterov   proc/maps: make v...
170
171
  struct task_struct *task_of_stack(struct task_struct *task,
  				struct vm_area_struct *vma, bool in_group)
b76437579   Siddhesh Poyarekar   procfs: mark thre...
172
  {
b76437579   Siddhesh Poyarekar   procfs: mark thre...
173
  	if (vm_is_stack_for_task(task, vma))
58cb65487   Oleg Nesterov   proc/maps: make v...
174
  		return task;
b76437579   Siddhesh Poyarekar   procfs: mark thre...
175
176
177
  
  	if (in_group) {
  		struct task_struct *t;
b76437579   Siddhesh Poyarekar   procfs: mark thre...
178

4449a51a7   Oleg Nesterov   vm_is_stack: use ...
179
  		for_each_thread(task, t) {
58cb65487   Oleg Nesterov   proc/maps: make v...
180
181
  			if (vm_is_stack_for_task(t, vma))
  				return t;
4449a51a7   Oleg Nesterov   vm_is_stack: use ...
182
  		}
b76437579   Siddhesh Poyarekar   procfs: mark thre...
183
  	}
58cb65487   Oleg Nesterov   proc/maps: make v...
184
  	return NULL;
b76437579   Siddhesh Poyarekar   procfs: mark thre...
185
  }
efc1a3b16   David Howells   nommu: don't need...
186
  #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
16d69265b   Andrew Morton   uninline arch_pic...
187
188
189
190
  void arch_pick_mmap_layout(struct mm_struct *mm)
  {
  	mm->mmap_base = TASK_UNMAPPED_BASE;
  	mm->get_unmapped_area = arch_get_unmapped_area;
16d69265b   Andrew Morton   uninline arch_pic...
191
192
  }
  #endif
912985dce   Rusty Russell   mm: Make generic ...
193

45888a0c6   Xiao Guangrong   export __get_user...
194
195
196
  /*
   * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
   * back to the regular GUP.
25985edce   Lucas De Marchi   Fix common misspe...
197
   * If the architecture not support this function, simply return with no
45888a0c6   Xiao Guangrong   export __get_user...
198
199
   * page pinned
   */
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
200
  int __weak __get_user_pages_fast(unsigned long start,
45888a0c6   Xiao Guangrong   export __get_user...
201
202
203
204
205
  				 int nr_pages, int write, struct page **pages)
  {
  	return 0;
  }
  EXPORT_SYMBOL_GPL(__get_user_pages_fast);
9de100d00   Andy Grover   mm: document get_...
206
207
208
209
210
211
212
213
  /**
   * get_user_pages_fast() - pin user pages in memory
   * @start:	starting user address
   * @nr_pages:	number of pages from start to pin
   * @write:	whether pages will be written to
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long.
   *
9de100d00   Andy Grover   mm: document get_...
214
215
216
   * Returns number of pages pinned. This may be fewer than the number
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
   * were pinned, returns -errno.
d2bf6be8a   Nick Piggin   mm: clean up get_...
217
218
219
220
221
222
223
224
225
226
227
228
   *
   * get_user_pages_fast provides equivalent functionality to get_user_pages,
   * operating on current and current->mm, with force=0 and vma=NULL. However
   * unlike get_user_pages, it must be called without mmap_sem held.
   *
   * get_user_pages_fast may take mmap_sem and page table locks, so no
   * assumptions can be made about lack of locking. get_user_pages_fast is to be
   * implemented in a way that is advantageous (vs get_user_pages()) when the
   * user memory area is already faulted in and present in ptes. However if the
   * pages have to be faulted in, it may turn out to be slightly slower so
   * callers need to carefully consider what to use. On many architectures,
   * get_user_pages_fast simply falls back to get_user_pages.
9de100d00   Andy Grover   mm: document get_...
229
   */
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
230
  int __weak get_user_pages_fast(unsigned long start,
912985dce   Rusty Russell   mm: Make generic ...
231
232
233
234
235
236
237
238
239
240
241
242
243
  				int nr_pages, int write, struct page **pages)
  {
  	struct mm_struct *mm = current->mm;
  	int ret;
  
  	down_read(&mm->mmap_sem);
  	ret = get_user_pages(current, mm, start, nr_pages,
  					write, 0, pages, NULL);
  	up_read(&mm->mmap_sem);
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(get_user_pages_fast);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
244

eb36c5873   Al Viro   new helper: vm_mm...
245
246
247
248
249
250
  unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
  	unsigned long flag, unsigned long pgoff)
  {
  	unsigned long ret;
  	struct mm_struct *mm = current->mm;
41badc15c   Michel Lespinasse   mm: make do_mmap_...
251
  	unsigned long populate;
eb36c5873   Al Viro   new helper: vm_mm...
252
253
254
255
  
  	ret = security_mmap_file(file, prot, flag);
  	if (!ret) {
  		down_write(&mm->mmap_sem);
bebeb3d68   Michel Lespinasse   mm: introduce mm_...
256
257
  		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
  				    &populate);
eb36c5873   Al Viro   new helper: vm_mm...
258
  		up_write(&mm->mmap_sem);
41badc15c   Michel Lespinasse   mm: make do_mmap_...
259
260
  		if (populate)
  			mm_populate(ret, populate);
eb36c5873   Al Viro   new helper: vm_mm...
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
  	}
  	return ret;
  }
  
  unsigned long vm_mmap(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
  	unsigned long flag, unsigned long offset)
  {
  	if (unlikely(offset + PAGE_ALIGN(len) < offset))
  		return -EINVAL;
  	if (unlikely(offset & ~PAGE_MASK))
  		return -EINVAL;
  
  	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
  }
  EXPORT_SYMBOL(vm_mmap);
39f1f78d5   Al Viro   nick kvfree() fro...
277
278
279
280
281
282
283
284
  void kvfree(const void *addr)
  {
  	if (is_vmalloc_addr(addr))
  		vfree(addr);
  	else
  		kfree(addr);
  }
  EXPORT_SYMBOL(kvfree);
9800339b5   Shaohua Li   mm: don't inline ...
285
286
287
  struct address_space *page_mapping(struct page *page)
  {
  	struct address_space *mapping = page->mapping;
03e5ac2fc   Mikulas Patocka   mm: fix crash whe...
288
289
290
  	/* This happens if someone calls flush_dcache_page on slab page */
  	if (unlikely(PageSlab(page)))
  		return NULL;
33806f06d   Shaohua Li   swap: make each s...
291
292
293
294
295
  	if (unlikely(PageSwapCache(page))) {
  		swp_entry_t entry;
  
  		entry.val = page_private(page);
  		mapping = swap_address_space(entry);
d2cf5ad63   Joonsoo Kim   swap: clean-up #i...
296
  	} else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
9800339b5   Shaohua Li   mm: don't inline ...
297
298
299
  		mapping = NULL;
  	return mapping;
  }
49f0ce5f9   Jerome Marchand   mm: add overcommi...
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
  int overcommit_ratio_handler(struct ctl_table *table, int write,
  			     void __user *buffer, size_t *lenp,
  			     loff_t *ppos)
  {
  	int ret;
  
  	ret = proc_dointvec(table, write, buffer, lenp, ppos);
  	if (ret == 0 && write)
  		sysctl_overcommit_kbytes = 0;
  	return ret;
  }
  
  int overcommit_kbytes_handler(struct ctl_table *table, int write,
  			     void __user *buffer, size_t *lenp,
  			     loff_t *ppos)
  {
  	int ret;
  
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
  	if (ret == 0 && write)
  		sysctl_overcommit_ratio = 0;
  	return ret;
  }
00619bcc4   Jerome Marchand   mm: factor commit...
323
324
325
326
327
  /*
   * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
   */
  unsigned long vm_commit_limit(void)
  {
49f0ce5f9   Jerome Marchand   mm: add overcommi...
328
329
330
331
332
333
334
335
336
337
  	unsigned long allowed;
  
  	if (sysctl_overcommit_kbytes)
  		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
  	else
  		allowed = ((totalram_pages - hugetlb_total_pages())
  			   * sysctl_overcommit_ratio / 100);
  	allowed += total_swap_pages;
  
  	return allowed;
00619bcc4   Jerome Marchand   mm: factor commit...
338
  }
a90902531   William Roberts   mm: Create utilit...
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
  /**
   * get_cmdline() - copy the cmdline value to a buffer.
   * @task:     the task whose cmdline value to copy.
   * @buffer:   the buffer to copy to.
   * @buflen:   the length of the buffer. Larger cmdline values are truncated
   *            to this length.
   * Returns the size of the cmdline field copied. Note that the copy does
   * not guarantee an ending NULL byte.
   */
  int get_cmdline(struct task_struct *task, char *buffer, int buflen)
  {
  	int res = 0;
  	unsigned int len;
  	struct mm_struct *mm = get_task_mm(task);
  	if (!mm)
  		goto out;
  	if (!mm->arg_end)
  		goto out_mm;	/* Shh! No looking before we're done */
  
  	len = mm->arg_end - mm->arg_start;
  
  	if (len > buflen)
  		len = buflen;
  
  	res = access_process_vm(task, mm->arg_start, buffer, len, 0);
  
  	/*
  	 * If the nul at the end of args has been overwritten, then
  	 * assume application is using setproctitle(3).
  	 */
  	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
  		len = strnlen(buffer, res);
  		if (len < res) {
  			res = len;
  		} else {
  			len = mm->env_end - mm->env_start;
  			if (len > buflen - res)
  				len = buflen - res;
  			res += access_process_vm(task, mm->env_start,
  						 buffer+res, len, 0);
  			res = strnlen(buffer, res);
  		}
  	}
  out_mm:
  	mmput(mm);
  out:
  	return res;
  }