Blame view

mm/util.c 6.53 KB
16d69265b   Andrew Morton   uninline arch_pic...
1
  #include <linux/mm.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
2
3
4
  #include <linux/slab.h>
  #include <linux/string.h>
  #include <linux/module.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
5
  #include <linux/err.h>
3b8f14b41   Adrian Bunk   mm/util.c must #i...
6
  #include <linux/sched.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
7
  #include <asm/uaccess.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
8

a8d154b00   Steven Rostedt   tracing: create a...
9
  #define CREATE_TRACE_POINTS
ad8d75fff   Steven Rostedt   tracing/events: m...
10
  #include <trace/events/kmem.h>
a8d154b00   Steven Rostedt   tracing: create a...
11

30992c97a   Matt Mackall   [PATCH] slob: int...
12
  /**
30992c97a   Matt Mackall   [PATCH] slob: int...
13
   * kstrdup - allocate space for and copy an existing string
30992c97a   Matt Mackall   [PATCH] slob: int...
14
15
16
17
18
19
20
21
22
23
24
25
   * @s: the string to duplicate
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrdup(const char *s, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strlen(s) + 1;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
26
  	buf = kmalloc_track_caller(len, gfp);
30992c97a   Matt Mackall   [PATCH] slob: int...
27
28
29
30
31
  	if (buf)
  		memcpy(buf, s, len);
  	return buf;
  }
  EXPORT_SYMBOL(kstrdup);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
32

1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
33
  /**
1e66df3ee   Jeremy Fitzhardinge   add kstrndup
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
   * kstrndup - allocate space for and copy an existing string
   * @s: the string to duplicate
   * @max: read at most @max chars from @s
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   */
  char *kstrndup(const char *s, size_t max, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strnlen(s, max);
  	buf = kmalloc_track_caller(len+1, gfp);
  	if (buf) {
  		memcpy(buf, s, len);
  		buf[len] = '\0';
  	}
  	return buf;
  }
  EXPORT_SYMBOL(kstrndup);
  
  /**
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
58
59
60
61
62
63
64
65
66
   * kmemdup - duplicate region of memory
   *
   * @src: memory region to duplicate
   * @len: memory region length
   * @gfp: GFP mask to use
   */
  void *kmemdup(const void *src, size_t len, gfp_t gfp)
  {
  	void *p;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
67
  	p = kmalloc_track_caller(len, gfp);
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
68
69
70
71
72
  	if (p)
  		memcpy(p, src, len);
  	return p;
  }
  EXPORT_SYMBOL(kmemdup);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
73
  /**
610a77e04   Li Zefan   memdup_user(): in...
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
   * memdup_user - duplicate memory region from user space
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
   * Returns an ERR_PTR() on failure.
   */
  void *memdup_user(const void __user *src, size_t len)
  {
  	void *p;
  
  	/*
  	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
  	 * cause pagefault, which makes it pointless to use GFP_NOFS
  	 * or GFP_ATOMIC.
  	 */
  	p = kmalloc_track_caller(len, GFP_KERNEL);
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  
  	return p;
  }
  EXPORT_SYMBOL(memdup_user);
  
  /**
93bc4e89c   Pekka Enberg   netfilter: fix do...
104
   * __krealloc - like krealloc() but don't free @p.
ef2ad80c7   Christoph Lameter   Slab allocators: ...
105
106
107
108
   * @p: object to reallocate memory for.
   * @new_size: how many bytes of memory are required.
   * @flags: the type of memory to allocate.
   *
93bc4e89c   Pekka Enberg   netfilter: fix do...
109
110
111
   * This function is like krealloc() except it never frees the originally
   * allocated buffer. Use this if you don't want to free the buffer immediately
   * like, for example, with RCU.
ef2ad80c7   Christoph Lameter   Slab allocators: ...
112
   */
93bc4e89c   Pekka Enberg   netfilter: fix do...
113
  void *__krealloc(const void *p, size_t new_size, gfp_t flags)
ef2ad80c7   Christoph Lameter   Slab allocators: ...
114
115
  {
  	void *ret;
ef8b4520b   Christoph Lameter   Slab allocators: ...
116
  	size_t ks = 0;
ef2ad80c7   Christoph Lameter   Slab allocators: ...
117

93bc4e89c   Pekka Enberg   netfilter: fix do...
118
  	if (unlikely(!new_size))
6cb8f9132   Christoph Lameter   Slab allocators: ...
119
  		return ZERO_SIZE_PTR;
ef2ad80c7   Christoph Lameter   Slab allocators: ...
120

ef8b4520b   Christoph Lameter   Slab allocators: ...
121
122
  	if (p)
  		ks = ksize(p);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
123
124
125
126
  	if (ks >= new_size)
  		return (void *)p;
  
  	ret = kmalloc_track_caller(new_size, flags);
93bc4e89c   Pekka Enberg   netfilter: fix do...
127
  	if (ret && p)
be21f0ab0   Adrian Bunk   fix mm/util.c:kre...
128
  		memcpy(ret, p, ks);
93bc4e89c   Pekka Enberg   netfilter: fix do...
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  
  	return ret;
  }
  EXPORT_SYMBOL(__krealloc);
  
  /**
   * krealloc - reallocate memory. The contents will remain unchanged.
   * @p: object to reallocate memory for.
   * @new_size: how many bytes of memory are required.
   * @flags: the type of memory to allocate.
   *
   * The contents of the object pointed to are preserved up to the
   * lesser of the new and old sizes.  If @p is %NULL, krealloc()
   * behaves exactly like kmalloc().  If @size is 0 and @p is not a
   * %NULL pointer, the object pointed to is freed.
   */
  void *krealloc(const void *p, size_t new_size, gfp_t flags)
  {
  	void *ret;
  
  	if (unlikely(!new_size)) {
ef2ad80c7   Christoph Lameter   Slab allocators: ...
150
  		kfree(p);
93bc4e89c   Pekka Enberg   netfilter: fix do...
151
  		return ZERO_SIZE_PTR;
ef2ad80c7   Christoph Lameter   Slab allocators: ...
152
  	}
93bc4e89c   Pekka Enberg   netfilter: fix do...
153
154
155
156
  
  	ret = __krealloc(p, new_size, flags);
  	if (ret && p != ret)
  		kfree(p);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
157
158
159
  	return ret;
  }
  EXPORT_SYMBOL(krealloc);
3ef0e5ba4   Johannes Weiner   slab: introduce k...
160
161
162
163
164
165
  /**
   * kzfree - like kfree but zero memory
   * @p: object to free memory of
   *
   * The memory of the object @p points to is zeroed before freed.
   * If @p is %NULL, kzfree() does nothing.
a234bdc9a   Pekka Enberg   slab: document kz...
166
167
168
169
   *
   * Note: this function zeroes the whole allocated buffer which can be a good
   * deal bigger than the requested buffer size passed to kmalloc(). So be
   * careful when using this function in performance sensitive code.
3ef0e5ba4   Johannes Weiner   slab: introduce k...
170
171
172
173
174
175
176
177
178
179
180
181
182
   */
  void kzfree(const void *p)
  {
  	size_t ks;
  	void *mem = (void *)p;
  
  	if (unlikely(ZERO_OR_NULL_PTR(mem)))
  		return;
  	ks = ksize(mem);
  	memset(mem, 0, ks);
  	kfree(mem);
  }
  EXPORT_SYMBOL(kzfree);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
183
184
  /*
   * strndup_user - duplicate an existing string from user space
96840aa00   Davi Arnaut   [PATCH] strndup_u...
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
   * @s: The string to duplicate
   * @n: Maximum number of bytes to copy, including the trailing NUL.
   */
  char *strndup_user(const char __user *s, long n)
  {
  	char *p;
  	long length;
  
  	length = strnlen_user(s, n);
  
  	if (!length)
  		return ERR_PTR(-EFAULT);
  
  	if (length > n)
  		return ERR_PTR(-EINVAL);
  
  	p = kmalloc(length, GFP_KERNEL);
  
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, s, length)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  
  	p[length - 1] = '\0';
  
  	return p;
  }
  EXPORT_SYMBOL(strndup_user);
16d69265b   Andrew Morton   uninline arch_pic...
216
217
218
219
220
221
222
223
224
  
  #ifndef HAVE_ARCH_PICK_MMAP_LAYOUT
  void arch_pick_mmap_layout(struct mm_struct *mm)
  {
  	mm->mmap_base = TASK_UNMAPPED_BASE;
  	mm->get_unmapped_area = arch_get_unmapped_area;
  	mm->unmap_area = arch_unmap_area;
  }
  #endif
912985dce   Rusty Russell   mm: Make generic ...
225

9de100d00   Andy Grover   mm: document get_...
226
227
228
229
230
231
232
233
  /**
   * get_user_pages_fast() - pin user pages in memory
   * @start:	starting user address
   * @nr_pages:	number of pages from start to pin
   * @write:	whether pages will be written to
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long.
   *
9de100d00   Andy Grover   mm: document get_...
234
235
236
   * Returns number of pages pinned. This may be fewer than the number
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
   * were pinned, returns -errno.
d2bf6be8a   Nick Piggin   mm: clean up get_...
237
238
239
240
241
242
243
244
245
246
247
248
   *
   * get_user_pages_fast provides equivalent functionality to get_user_pages,
   * operating on current and current->mm, with force=0 and vma=NULL. However
   * unlike get_user_pages, it must be called without mmap_sem held.
   *
   * get_user_pages_fast may take mmap_sem and page table locks, so no
   * assumptions can be made about lack of locking. get_user_pages_fast is to be
   * implemented in a way that is advantageous (vs get_user_pages()) when the
   * user memory area is already faulted in and present in ptes. However if the
   * pages have to be faulted in, it may turn out to be slightly slower so
   * callers need to carefully consider what to use. On many architectures,
   * get_user_pages_fast simply falls back to get_user_pages.
9de100d00   Andy Grover   mm: document get_...
249
   */
912985dce   Rusty Russell   mm: Make generic ...
250
251
252
253
254
255
256
257
258
259
260
261
262
263
  int __attribute__((weak)) get_user_pages_fast(unsigned long start,
  				int nr_pages, int write, struct page **pages)
  {
  	struct mm_struct *mm = current->mm;
  	int ret;
  
  	down_read(&mm->mmap_sem);
  	ret = get_user_pages(current, mm, start, nr_pages,
  					write, 0, pages, NULL);
  	up_read(&mm->mmap_sem);
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(get_user_pages_fast);
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
264
265
  
  /* Tracepoints definitions. */
ca2b84cb3   Eduard - Gabriel Munteanu   kmemtrace: use tr...
266
267
268
269
270
271
  EXPORT_TRACEPOINT_SYMBOL(kmalloc);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
  EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
  EXPORT_TRACEPOINT_SYMBOL(kfree);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);