Blame view

mm/util.c 8.93 KB
16d69265b   Andrew Morton   uninline arch_pic...
1
  #include <linux/mm.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
2
3
  #include <linux/slab.h>
  #include <linux/string.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
4
  #include <linux/export.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
5
  #include <linux/err.h>
3b8f14b41   Adrian Bunk   mm/util.c must #i...
6
  #include <linux/sched.h>
eb36c5873   Al Viro   new helper: vm_mm...
7
  #include <linux/security.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
8
  #include <asm/uaccess.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
9

6038def0d   Namhyung Kim   mm: nommu: sort m...
10
  #include "internal.h"
a8d154b00   Steven Rostedt   tracing: create a...
11
  #define CREATE_TRACE_POINTS
ad8d75fff   Steven Rostedt   tracing/events: m...
12
  #include <trace/events/kmem.h>
a8d154b00   Steven Rostedt   tracing: create a...
13

30992c97a   Matt Mackall   [PATCH] slob: int...
14
  /**
30992c97a   Matt Mackall   [PATCH] slob: int...
15
   * kstrdup - allocate space for and copy an existing string
30992c97a   Matt Mackall   [PATCH] slob: int...
16
17
18
19
20
21
22
23
24
25
26
27
   * @s: the string to duplicate
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrdup(const char *s, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strlen(s) + 1;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
28
  	buf = kmalloc_track_caller(len, gfp);
30992c97a   Matt Mackall   [PATCH] slob: int...
29
30
31
32
33
  	if (buf)
  		memcpy(buf, s, len);
  	return buf;
  }
  EXPORT_SYMBOL(kstrdup);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
34

1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
35
  /**
1e66df3ee   Jeremy Fitzhardinge   add kstrndup
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
   * kstrndup - allocate space for and copy an existing string
   * @s: the string to duplicate
   * @max: read at most @max chars from @s
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrndup(const char *s, size_t max, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strnlen(s, max);
  	buf = kmalloc_track_caller(len+1, gfp);
  	if (buf) {
  		memcpy(buf, s, len);
  		buf[len] = '\0';
  	}
  	return buf;
  }
  EXPORT_SYMBOL(kstrndup);
  
  /**
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
60
61
62
63
64
65
66
67
68
   * kmemdup - duplicate region of memory
   *
   * @src: memory region to duplicate
   * @len: memory region length
   * @gfp: GFP mask to use
   */
  void *kmemdup(const void *src, size_t len, gfp_t gfp)
  {
  	void *p;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
69
  	p = kmalloc_track_caller(len, gfp);
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
70
71
72
73
74
  	if (p)
  		memcpy(p, src, len);
  	return p;
  }
  EXPORT_SYMBOL(kmemdup);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
75
  /**
610a77e04   Li Zefan   memdup_user(): in...
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
   * memdup_user - duplicate memory region from user space
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
   * Returns an ERR_PTR() on failure.
   */
  void *memdup_user(const void __user *src, size_t len)
  {
  	void *p;
  
  	/*
  	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
  	 * cause pagefault, which makes it pointless to use GFP_NOFS
  	 * or GFP_ATOMIC.
  	 */
  	p = kmalloc_track_caller(len, GFP_KERNEL);
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  
  	return p;
  }
  EXPORT_SYMBOL(memdup_user);
  
  /**
93bc4e89c   Pekka Enberg   netfilter: fix do...
106
   * __krealloc - like krealloc() but don't free @p.
ef2ad80c7   Christoph Lameter   Slab allocators: ...
107
108
109
110
   * @p: object to reallocate memory for.
   * @new_size: how many bytes of memory are required.
   * @flags: the type of memory to allocate.
   *
93bc4e89c   Pekka Enberg   netfilter: fix do...
111
112
113
   * This function is like krealloc() except it never frees the originally
   * allocated buffer. Use this if you don't want to free the buffer immediately
   * like, for example, with RCU.
ef2ad80c7   Christoph Lameter   Slab allocators: ...
114
   */
93bc4e89c   Pekka Enberg   netfilter: fix do...
115
  void *__krealloc(const void *p, size_t new_size, gfp_t flags)
ef2ad80c7   Christoph Lameter   Slab allocators: ...
116
117
  {
  	void *ret;
ef8b4520b   Christoph Lameter   Slab allocators: ...
118
  	size_t ks = 0;
ef2ad80c7   Christoph Lameter   Slab allocators: ...
119

93bc4e89c   Pekka Enberg   netfilter: fix do...
120
  	if (unlikely(!new_size))
6cb8f9132   Christoph Lameter   Slab allocators: ...
121
  		return ZERO_SIZE_PTR;
ef2ad80c7   Christoph Lameter   Slab allocators: ...
122

ef8b4520b   Christoph Lameter   Slab allocators: ...
123
124
  	if (p)
  		ks = ksize(p);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
125
126
127
128
  	if (ks >= new_size)
  		return (void *)p;
  
  	ret = kmalloc_track_caller(new_size, flags);
93bc4e89c   Pekka Enberg   netfilter: fix do...
129
  	if (ret && p)
be21f0ab0   Adrian Bunk   fix mm/util.c:kre...
130
  		memcpy(ret, p, ks);
93bc4e89c   Pekka Enberg   netfilter: fix do...
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  
  	return ret;
  }
  EXPORT_SYMBOL(__krealloc);
  
  /**
   * krealloc - reallocate memory. The contents will remain unchanged.
   * @p: object to reallocate memory for.
   * @new_size: how many bytes of memory are required.
   * @flags: the type of memory to allocate.
   *
   * The contents of the object pointed to are preserved up to the
   * lesser of the new and old sizes.  If @p is %NULL, krealloc()
   * behaves exactly like kmalloc().  If @size is 0 and @p is not a
   * %NULL pointer, the object pointed to is freed.
   */
  void *krealloc(const void *p, size_t new_size, gfp_t flags)
  {
  	void *ret;
  
  	if (unlikely(!new_size)) {
ef2ad80c7   Christoph Lameter   Slab allocators: ...
152
  		kfree(p);
93bc4e89c   Pekka Enberg   netfilter: fix do...
153
  		return ZERO_SIZE_PTR;
ef2ad80c7   Christoph Lameter   Slab allocators: ...
154
  	}
93bc4e89c   Pekka Enberg   netfilter: fix do...
155
156
157
158
  
  	ret = __krealloc(p, new_size, flags);
  	if (ret && p != ret)
  		kfree(p);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
159
160
161
  	return ret;
  }
  EXPORT_SYMBOL(krealloc);
3ef0e5ba4   Johannes Weiner   slab: introduce k...
162
163
164
165
166
167
  /**
   * kzfree - like kfree but zero memory
   * @p: object to free memory of
   *
   * The memory of the object @p points to is zeroed before freed.
   * If @p is %NULL, kzfree() does nothing.
a234bdc9a   Pekka Enberg   slab: document kz...
168
169
170
171
   *
   * Note: this function zeroes the whole allocated buffer which can be a good
   * deal bigger than the requested buffer size passed to kmalloc(). So be
   * careful when using this function in performance sensitive code.
3ef0e5ba4   Johannes Weiner   slab: introduce k...
172
173
174
175
176
177
178
179
180
181
182
183
184
   */
  void kzfree(const void *p)
  {
  	size_t ks;
  	void *mem = (void *)p;
  
  	if (unlikely(ZERO_OR_NULL_PTR(mem)))
  		return;
  	ks = ksize(mem);
  	memset(mem, 0, ks);
  	kfree(mem);
  }
  EXPORT_SYMBOL(kzfree);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
185
186
  /*
   * strndup_user - duplicate an existing string from user space
96840aa00   Davi Arnaut   [PATCH] strndup_u...
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
   * @s: The string to duplicate
   * @n: Maximum number of bytes to copy, including the trailing NUL.
   */
  char *strndup_user(const char __user *s, long n)
  {
  	char *p;
  	long length;
  
  	length = strnlen_user(s, n);
  
  	if (!length)
  		return ERR_PTR(-EFAULT);
  
  	if (length > n)
  		return ERR_PTR(-EINVAL);
90d740455   Julia Lawall   mm: use memdup_user
202
  	p = memdup_user(s, length);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
203

90d740455   Julia Lawall   mm: use memdup_user
204
205
  	if (IS_ERR(p))
  		return p;
96840aa00   Davi Arnaut   [PATCH] strndup_u...
206
207
208
209
210
211
  
  	p[length - 1] = '\0';
  
  	return p;
  }
  EXPORT_SYMBOL(strndup_user);
16d69265b   Andrew Morton   uninline arch_pic...
212

6038def0d   Namhyung Kim   mm: nommu: sort m...
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  		struct vm_area_struct *prev, struct rb_node *rb_parent)
  {
  	struct vm_area_struct *next;
  
  	vma->vm_prev = prev;
  	if (prev) {
  		next = prev->vm_next;
  		prev->vm_next = vma;
  	} else {
  		mm->mmap = vma;
  		if (rb_parent)
  			next = rb_entry(rb_parent,
  					struct vm_area_struct, vm_rb);
  		else
  			next = NULL;
  	}
  	vma->vm_next = next;
  	if (next)
  		next->vm_prev = vma;
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
  /* Check if the vma is being used as a stack by this task */
  static int vm_is_stack_for_task(struct task_struct *t,
  				struct vm_area_struct *vma)
  {
  	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
  }
  
  /*
   * Check if the vma is being used as a stack.
   * If is_group is non-zero, check in the entire thread group or else
   * just check in the current task. Returns the pid of the task that
   * the vma is stack for.
   */
  pid_t vm_is_stack(struct task_struct *task,
  		  struct vm_area_struct *vma, int in_group)
  {
  	pid_t ret = 0;
  
  	if (vm_is_stack_for_task(task, vma))
  		return task->pid;
  
  	if (in_group) {
  		struct task_struct *t;
  		rcu_read_lock();
  		if (!pid_alive(task))
  			goto done;
  
  		t = task;
  		do {
  			if (vm_is_stack_for_task(t, vma)) {
  				ret = t->pid;
  				goto done;
  			}
  		} while_each_thread(task, t);
  done:
  		rcu_read_unlock();
  	}
  
  	return ret;
  }
efc1a3b16   David Howells   nommu: don't need...
274
  #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
16d69265b   Andrew Morton   uninline arch_pic...
275
276
277
278
279
280
281
  void arch_pick_mmap_layout(struct mm_struct *mm)
  {
  	mm->mmap_base = TASK_UNMAPPED_BASE;
  	mm->get_unmapped_area = arch_get_unmapped_area;
  	mm->unmap_area = arch_unmap_area;
  }
  #endif
912985dce   Rusty Russell   mm: Make generic ...
282

45888a0c6   Xiao Guangrong   export __get_user...
283
284
285
  /*
   * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
   * back to the regular GUP.
25985edce   Lucas De Marchi   Fix common misspe...
286
   * If the architecture not support this function, simply return with no
45888a0c6   Xiao Guangrong   export __get_user...
287
288
289
290
291
292
293
294
   * page pinned
   */
  int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
  				 int nr_pages, int write, struct page **pages)
  {
  	return 0;
  }
  EXPORT_SYMBOL_GPL(__get_user_pages_fast);
9de100d00   Andy Grover   mm: document get_...
295
296
297
298
299
300
301
302
  /**
   * get_user_pages_fast() - pin user pages in memory
   * @start:	starting user address
   * @nr_pages:	number of pages from start to pin
   * @write:	whether pages will be written to
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long.
   *
9de100d00   Andy Grover   mm: document get_...
303
304
305
   * Returns number of pages pinned. This may be fewer than the number
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
   * were pinned, returns -errno.
d2bf6be8a   Nick Piggin   mm: clean up get_...
306
307
308
309
310
311
312
313
314
315
316
317
   *
   * get_user_pages_fast provides equivalent functionality to get_user_pages,
   * operating on current and current->mm, with force=0 and vma=NULL. However
   * unlike get_user_pages, it must be called without mmap_sem held.
   *
   * get_user_pages_fast may take mmap_sem and page table locks, so no
   * assumptions can be made about lack of locking. get_user_pages_fast is to be
   * implemented in a way that is advantageous (vs get_user_pages()) when the
   * user memory area is already faulted in and present in ptes. However if the
   * pages have to be faulted in, it may turn out to be slightly slower so
   * callers need to carefully consider what to use. On many architectures,
   * get_user_pages_fast simply falls back to get_user_pages.
9de100d00   Andy Grover   mm: document get_...
318
   */
912985dce   Rusty Russell   mm: Make generic ...
319
320
321
322
323
324
325
326
327
328
329
330
331
332
  int __attribute__((weak)) get_user_pages_fast(unsigned long start,
  				int nr_pages, int write, struct page **pages)
  {
  	struct mm_struct *mm = current->mm;
  	int ret;
  
  	down_read(&mm->mmap_sem);
  	ret = get_user_pages(current, mm, start, nr_pages,
  					write, 0, pages, NULL);
  	up_read(&mm->mmap_sem);
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(get_user_pages_fast);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
333

eb36c5873   Al Viro   new helper: vm_mm...
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
  unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
  	unsigned long flag, unsigned long pgoff)
  {
  	unsigned long ret;
  	struct mm_struct *mm = current->mm;
  
  	ret = security_mmap_file(file, prot, flag);
  	if (!ret) {
  		down_write(&mm->mmap_sem);
  		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
  		up_write(&mm->mmap_sem);
  	}
  	return ret;
  }
  
  unsigned long vm_mmap(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
  	unsigned long flag, unsigned long offset)
  {
  	if (unlikely(offset + PAGE_ALIGN(len) < offset))
  		return -EINVAL;
  	if (unlikely(offset & ~PAGE_MASK))
  		return -EINVAL;
  
  	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
  }
  EXPORT_SYMBOL(vm_mmap);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
362
  /* Tracepoints definitions. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
363
364
365
366
367
368
  EXPORT_TRACEPOINT_SYMBOL(kmalloc);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
  EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
  EXPORT_TRACEPOINT_SYMBOL(kfree);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);