Blame view

mm/util.c 7.29 KB
16d69265b   Andrew Morton   uninline arch_pic...
1
  #include <linux/mm.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
2
3
  #include <linux/slab.h>
  #include <linux/string.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
4
  #include <linux/export.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
5
  #include <linux/err.h>
3b8f14b41   Adrian Bunk   mm/util.c must #i...
6
  #include <linux/sched.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
7
  #include <asm/uaccess.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
8

6038def0d   Namhyung Kim   mm: nommu: sort m...
9
  #include "internal.h"
a8d154b00   Steven Rostedt   tracing: create a...
10
  #define CREATE_TRACE_POINTS
ad8d75fff   Steven Rostedt   tracing/events: m...
11
  #include <trace/events/kmem.h>
a8d154b00   Steven Rostedt   tracing: create a...
12

30992c97a   Matt Mackall   [PATCH] slob: int...
13
  /**
30992c97a   Matt Mackall   [PATCH] slob: int...
14
   * kstrdup - allocate space for and copy an existing string
30992c97a   Matt Mackall   [PATCH] slob: int...
15
16
17
18
19
20
21
22
23
24
25
26
   * @s: the string to duplicate
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrdup(const char *s, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strlen(s) + 1;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
27
  	buf = kmalloc_track_caller(len, gfp);
30992c97a   Matt Mackall   [PATCH] slob: int...
28
29
30
31
32
  	if (buf)
  		memcpy(buf, s, len);
  	return buf;
  }
  EXPORT_SYMBOL(kstrdup);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
33

1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
34
  /**
1e66df3ee   Jeremy Fitzhardinge   add kstrndup
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
   * kstrndup - allocate space for and copy an existing string
   * @s: the string to duplicate
   * @max: read at most @max chars from @s
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrndup(const char *s, size_t max, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strnlen(s, max);
  	buf = kmalloc_track_caller(len+1, gfp);
  	if (buf) {
  		memcpy(buf, s, len);
  		buf[len] = '\0';
  	}
  	return buf;
  }
  EXPORT_SYMBOL(kstrndup);
  
  /**
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
59
60
61
62
63
64
65
66
67
   * kmemdup - duplicate region of memory
   *
   * @src: memory region to duplicate
   * @len: memory region length
   * @gfp: GFP mask to use
   */
  void *kmemdup(const void *src, size_t len, gfp_t gfp)
  {
  	void *p;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
68
  	p = kmalloc_track_caller(len, gfp);
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
69
70
71
72
73
  	if (p)
  		memcpy(p, src, len);
  	return p;
  }
  EXPORT_SYMBOL(kmemdup);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
74
  /**
610a77e04   Li Zefan   memdup_user(): in...
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
   * memdup_user - duplicate memory region from user space
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
   * Returns an ERR_PTR() on failure.
   */
  void *memdup_user(const void __user *src, size_t len)
  {
  	void *p;
  
  	/*
  	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
  	 * cause pagefault, which makes it pointless to use GFP_NOFS
  	 * or GFP_ATOMIC.
  	 */
  	p = kmalloc_track_caller(len, GFP_KERNEL);
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  
  	return p;
  }
  EXPORT_SYMBOL(memdup_user);
  
  /**
93bc4e89c   Pekka Enberg   netfilter: fix do...
105
   * __krealloc - like krealloc() but don't free @p.
ef2ad80c7   Christoph Lameter   Slab allocators: ...
106
107
108
109
   * @p: object to reallocate memory for.
   * @new_size: how many bytes of memory are required.
   * @flags: the type of memory to allocate.
   *
93bc4e89c   Pekka Enberg   netfilter: fix do...
110
111
112
   * This function is like krealloc() except it never frees the originally
   * allocated buffer. Use this if you don't want to free the buffer immediately
   * like, for example, with RCU.
ef2ad80c7   Christoph Lameter   Slab allocators: ...
113
   */
93bc4e89c   Pekka Enberg   netfilter: fix do...
114
  void *__krealloc(const void *p, size_t new_size, gfp_t flags)
ef2ad80c7   Christoph Lameter   Slab allocators: ...
115
116
  {
  	void *ret;
ef8b4520b   Christoph Lameter   Slab allocators: ...
117
  	size_t ks = 0;
ef2ad80c7   Christoph Lameter   Slab allocators: ...
118

93bc4e89c   Pekka Enberg   netfilter: fix do...
119
  	if (unlikely(!new_size))
6cb8f9132   Christoph Lameter   Slab allocators: ...
120
  		return ZERO_SIZE_PTR;
ef2ad80c7   Christoph Lameter   Slab allocators: ...
121

ef8b4520b   Christoph Lameter   Slab allocators: ...
122
123
  	if (p)
  		ks = ksize(p);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
124
125
126
127
  	if (ks >= new_size)
  		return (void *)p;
  
  	ret = kmalloc_track_caller(new_size, flags);
93bc4e89c   Pekka Enberg   netfilter: fix do...
128
  	if (ret && p)
be21f0ab0   Adrian Bunk   fix mm/util.c:kre...
129
  		memcpy(ret, p, ks);
93bc4e89c   Pekka Enberg   netfilter: fix do...
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
  
  	return ret;
  }
  EXPORT_SYMBOL(__krealloc);
  
  /**
   * krealloc - reallocate memory. The contents will remain unchanged.
   * @p: object to reallocate memory for.
   * @new_size: how many bytes of memory are required.
   * @flags: the type of memory to allocate.
   *
   * The contents of the object pointed to are preserved up to the
   * lesser of the new and old sizes.  If @p is %NULL, krealloc()
   * behaves exactly like kmalloc().  If @size is 0 and @p is not a
   * %NULL pointer, the object pointed to is freed.
   */
  void *krealloc(const void *p, size_t new_size, gfp_t flags)
  {
  	void *ret;
  
  	if (unlikely(!new_size)) {
ef2ad80c7   Christoph Lameter   Slab allocators: ...
151
  		kfree(p);
93bc4e89c   Pekka Enberg   netfilter: fix do...
152
  		return ZERO_SIZE_PTR;
ef2ad80c7   Christoph Lameter   Slab allocators: ...
153
  	}
93bc4e89c   Pekka Enberg   netfilter: fix do...
154
155
156
157
  
  	ret = __krealloc(p, new_size, flags);
  	if (ret && p != ret)
  		kfree(p);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
158
159
160
  	return ret;
  }
  EXPORT_SYMBOL(krealloc);
3ef0e5ba4   Johannes Weiner   slab: introduce k...
161
162
163
164
165
166
  /**
   * kzfree - like kfree but zero memory
   * @p: object to free memory of
   *
   * The memory of the object @p points to is zeroed before freed.
   * If @p is %NULL, kzfree() does nothing.
a234bdc9a   Pekka Enberg   slab: document kz...
167
168
169
170
   *
   * Note: this function zeroes the whole allocated buffer which can be a good
   * deal bigger than the requested buffer size passed to kmalloc(). So be
   * careful when using this function in performance sensitive code.
3ef0e5ba4   Johannes Weiner   slab: introduce k...
171
172
173
174
175
176
177
178
179
180
181
182
183
   */
  void kzfree(const void *p)
  {
  	size_t ks;
  	void *mem = (void *)p;
  
  	if (unlikely(ZERO_OR_NULL_PTR(mem)))
  		return;
  	ks = ksize(mem);
  	memset(mem, 0, ks);
  	kfree(mem);
  }
  EXPORT_SYMBOL(kzfree);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
184
185
  /*
   * strndup_user - duplicate an existing string from user space
96840aa00   Davi Arnaut   [PATCH] strndup_u...
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
   * @s: The string to duplicate
   * @n: Maximum number of bytes to copy, including the trailing NUL.
   */
  char *strndup_user(const char __user *s, long n)
  {
  	char *p;
  	long length;
  
  	length = strnlen_user(s, n);
  
  	if (!length)
  		return ERR_PTR(-EFAULT);
  
  	if (length > n)
  		return ERR_PTR(-EINVAL);
90d740455   Julia Lawall   mm: use memdup_user
201
  	p = memdup_user(s, length);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
202

90d740455   Julia Lawall   mm: use memdup_user
203
204
  	if (IS_ERR(p))
  		return p;
96840aa00   Davi Arnaut   [PATCH] strndup_u...
205
206
207
208
209
210
  
  	p[length - 1] = '\0';
  
  	return p;
  }
  EXPORT_SYMBOL(strndup_user);
16d69265b   Andrew Morton   uninline arch_pic...
211

6038def0d   Namhyung Kim   mm: nommu: sort m...
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  		struct vm_area_struct *prev, struct rb_node *rb_parent)
  {
  	struct vm_area_struct *next;
  
  	vma->vm_prev = prev;
  	if (prev) {
  		next = prev->vm_next;
  		prev->vm_next = vma;
  	} else {
  		mm->mmap = vma;
  		if (rb_parent)
  			next = rb_entry(rb_parent,
  					struct vm_area_struct, vm_rb);
  		else
  			next = NULL;
  	}
  	vma->vm_next = next;
  	if (next)
  		next->vm_prev = vma;
  }
efc1a3b16   David Howells   nommu: don't need...
233
  #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
16d69265b   Andrew Morton   uninline arch_pic...
234
235
236
237
238
239
240
  void arch_pick_mmap_layout(struct mm_struct *mm)
  {
  	mm->mmap_base = TASK_UNMAPPED_BASE;
  	mm->get_unmapped_area = arch_get_unmapped_area;
  	mm->unmap_area = arch_unmap_area;
  }
  #endif
912985dce   Rusty Russell   mm: Make generic ...
241

45888a0c6   Xiao Guangrong   export __get_user...
242
243
244
  /*
   * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
   * back to the regular GUP.
25985edce   Lucas De Marchi   Fix common misspe...
245
   * If the architecture not support this function, simply return with no
45888a0c6   Xiao Guangrong   export __get_user...
246
247
248
249
250
251
252
253
   * page pinned
   */
  int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
  				 int nr_pages, int write, struct page **pages)
  {
  	return 0;
  }
  EXPORT_SYMBOL_GPL(__get_user_pages_fast);
9de100d00   Andy Grover   mm: document get_...
254
255
256
257
258
259
260
261
  /**
   * get_user_pages_fast() - pin user pages in memory
   * @start:	starting user address
   * @nr_pages:	number of pages from start to pin
   * @write:	whether pages will be written to
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long.
   *
9de100d00   Andy Grover   mm: document get_...
262
263
264
   * Returns number of pages pinned. This may be fewer than the number
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
   * were pinned, returns -errno.
d2bf6be8a   Nick Piggin   mm: clean up get_...
265
266
267
268
269
270
271
272
273
274
275
276
   *
   * get_user_pages_fast provides equivalent functionality to get_user_pages,
   * operating on current and current->mm, with force=0 and vma=NULL. However
   * unlike get_user_pages, it must be called without mmap_sem held.
   *
   * get_user_pages_fast may take mmap_sem and page table locks, so no
   * assumptions can be made about lack of locking. get_user_pages_fast is to be
   * implemented in a way that is advantageous (vs get_user_pages()) when the
   * user memory area is already faulted in and present in ptes. However if the
   * pages have to be faulted in, it may turn out to be slightly slower so
   * callers need to carefully consider what to use. On many architectures,
   * get_user_pages_fast simply falls back to get_user_pages.
9de100d00   Andy Grover   mm: document get_...
277
   */
912985dce   Rusty Russell   mm: Make generic ...
278
279
280
281
282
283
284
285
286
287
288
289
290
291
  int __attribute__((weak)) get_user_pages_fast(unsigned long start,
  				int nr_pages, int write, struct page **pages)
  {
  	struct mm_struct *mm = current->mm;
  	int ret;
  
  	down_read(&mm->mmap_sem);
  	ret = get_user_pages(current, mm, start, nr_pages,
  					write, 0, pages, NULL);
  	up_read(&mm->mmap_sem);
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(get_user_pages_fast);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
292
293
  
  /* Tracepoints definitions. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
294
295
296
297
298
299
  EXPORT_TRACEPOINT_SYMBOL(kmalloc);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
  EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
  EXPORT_TRACEPOINT_SYMBOL(kfree);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);