Blame view

mm/util.c 23.9 KB
457c89965   Thomas Gleixner   treewide: Add SPD...
1
  // SPDX-License-Identifier: GPL-2.0-only
16d69265b   Andrew Morton   uninline arch_pic...
2
  #include <linux/mm.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
3
4
  #include <linux/slab.h>
  #include <linux/string.h>
3b32123d7   Gideon Israel Dsouza   mm: use macros fr...
5
  #include <linux/compiler.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
6
  #include <linux/export.h>
96840aa00   Davi Arnaut   [PATCH] strndup_u...
7
  #include <linux/err.h>
3b8f14b41   Adrian Bunk   mm/util.c must #i...
8
  #include <linux/sched.h>
6e84f3152   Ingo Molnar   sched/headers: Pr...
9
  #include <linux/sched/mm.h>
79eb597cb   Daniel Jordan   mm: add account_l...
10
  #include <linux/sched/signal.h>
68db0cf10   Ingo Molnar   sched/headers: Pr...
11
  #include <linux/sched/task_stack.h>
eb36c5873   Al Viro   new helper: vm_mm...
12
  #include <linux/security.h>
9800339b5   Shaohua Li   mm: don't inline ...
13
  #include <linux/swap.h>
33806f06d   Shaohua Li   swap: make each s...
14
  #include <linux/swapops.h>
00619bcc4   Jerome Marchand   mm: factor commit...
15
16
  #include <linux/mman.h>
  #include <linux/hugetlb.h>
39f1f78d5   Al Viro   nick kvfree() fro...
17
  #include <linux/vmalloc.h>
897ab3e0c   Mike Rapoport   userfaultfd: non-...
18
  #include <linux/userfaultfd_k.h>
649775be6   Alexandre Ghiti   mm, fs: move rand...
19
  #include <linux/elf.h>
67f3977f8   Alexandre Ghiti   arm64, mm: move g...
20
21
  #include <linux/elf-randomize.h>
  #include <linux/personality.h>
649775be6   Alexandre Ghiti   mm, fs: move rand...
22
  #include <linux/random.h>
67f3977f8   Alexandre Ghiti   arm64, mm: move g...
23
24
25
  #include <linux/processor.h>
  #include <linux/sizes.h>
  #include <linux/compat.h>
00619bcc4   Jerome Marchand   mm: factor commit...
26

7c0f6ba68   Linus Torvalds   Replace <asm/uacc...
27
  #include <linux/uaccess.h>
30992c97a   Matt Mackall   [PATCH] slob: int...
28

6038def0d   Namhyung Kim   mm: nommu: sort m...
29
  #include "internal.h"
a4bb1e43e   Andrzej Hajda   mm/util: add kstr...
30
31
32
33
34
35
36
37
38
39
40
41
  /**
   * kfree_const - conditionally free memory
   * @x: pointer to the memory
   *
   * Function calls kfree only if @x is not in .rodata section.
   */
  void kfree_const(const void *x)
  {
  	if (!is_kernel_rodata((unsigned long)x))
  		kfree(x);
  }
  EXPORT_SYMBOL(kfree_const);
30992c97a   Matt Mackall   [PATCH] slob: int...
42
  /**
30992c97a   Matt Mackall   [PATCH] slob: int...
43
   * kstrdup - allocate space for and copy an existing string
30992c97a   Matt Mackall   [PATCH] slob: int...
44
45
   * @s: the string to duplicate
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
a862f68a8   Mike Rapoport   docs/core-api/mm:...
46
47
   *
   * Return: newly allocated copy of @s or %NULL in case of error
30992c97a   Matt Mackall   [PATCH] slob: int...
48
49
50
51
52
53
54
55
56
57
   */
  char *kstrdup(const char *s, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strlen(s) + 1;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
58
  	buf = kmalloc_track_caller(len, gfp);
30992c97a   Matt Mackall   [PATCH] slob: int...
59
60
61
62
63
  	if (buf)
  		memcpy(buf, s, len);
  	return buf;
  }
  EXPORT_SYMBOL(kstrdup);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
64

1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
65
  /**
a4bb1e43e   Andrzej Hajda   mm/util: add kstr...
66
67
68
69
   * kstrdup_const - conditionally duplicate an existing const string
   * @s: the string to duplicate
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   *
295a17302   Bartosz Golaszewski   mm/util.c: update...
70
71
   * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
   * must not be passed to krealloc().
a862f68a8   Mike Rapoport   docs/core-api/mm:...
72
73
74
   *
   * Return: source string if it is in .rodata section otherwise
   * fallback to kstrdup.
a4bb1e43e   Andrzej Hajda   mm/util: add kstr...
75
76
77
78
79
80
81
82
83
84
85
   */
  const char *kstrdup_const(const char *s, gfp_t gfp)
  {
  	if (is_kernel_rodata((unsigned long)s))
  		return s;
  
  	return kstrdup(s, gfp);
  }
  EXPORT_SYMBOL(kstrdup_const);
  
  /**
1e66df3ee   Jeremy Fitzhardinge   add kstrndup
86
87
88
89
   * kstrndup - allocate space for and copy an existing string
   * @s: the string to duplicate
   * @max: read at most @max chars from @s
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
f35157417   David Howells   Provide a functio...
90
91
   *
   * Note: Use kmemdup_nul() instead if the size is known exactly.
a862f68a8   Mike Rapoport   docs/core-api/mm:...
92
93
   *
   * Return: newly allocated copy of @s or %NULL in case of error
1e66df3ee   Jeremy Fitzhardinge   add kstrndup
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
   */
  char *kstrndup(const char *s, size_t max, gfp_t gfp)
  {
  	size_t len;
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	len = strnlen(s, max);
  	buf = kmalloc_track_caller(len+1, gfp);
  	if (buf) {
  		memcpy(buf, s, len);
  		buf[len] = '\0';
  	}
  	return buf;
  }
  EXPORT_SYMBOL(kstrndup);
  
  /**
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
114
115
116
117
118
   * kmemdup - duplicate region of memory
   *
   * @src: memory region to duplicate
   * @len: memory region length
   * @gfp: GFP mask to use
a862f68a8   Mike Rapoport   docs/core-api/mm:...
119
120
   *
   * Return: newly allocated copy of @src or %NULL in case of error
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
121
122
123
124
   */
  void *kmemdup(const void *src, size_t len, gfp_t gfp)
  {
  	void *p;
1d2c8eea6   Christoph Hellwig   [PATCH] slab: cle...
125
  	p = kmalloc_track_caller(len, gfp);
1a2f67b45   Alexey Dobriyan   [PATCH] kmemdup: ...
126
127
128
129
130
  	if (p)
  		memcpy(p, src, len);
  	return p;
  }
  EXPORT_SYMBOL(kmemdup);
ef2ad80c7   Christoph Lameter   Slab allocators: ...
131
  /**
f35157417   David Howells   Provide a functio...
132
133
134
135
   * kmemdup_nul - Create a NUL-terminated string from unterminated data
   * @s: The data to stringify
   * @len: The size of the data
   * @gfp: the GFP mask used in the kmalloc() call when allocating memory
a862f68a8   Mike Rapoport   docs/core-api/mm:...
136
137
138
   *
   * Return: newly allocated copy of @s with NUL-termination or %NULL in
   * case of error
f35157417   David Howells   Provide a functio...
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
   */
  char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
  {
  	char *buf;
  
  	if (!s)
  		return NULL;
  
  	buf = kmalloc_track_caller(len + 1, gfp);
  	if (buf) {
  		memcpy(buf, s, len);
  		buf[len] = '\0';
  	}
  	return buf;
  }
  EXPORT_SYMBOL(kmemdup_nul);
  
  /**
610a77e04   Li Zefan   memdup_user(): in...
157
158
159
160
161
   * memdup_user - duplicate memory region from user space
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
a862f68a8   Mike Rapoport   docs/core-api/mm:...
162
   * Return: an ERR_PTR() on failure.  Result is physically
50fd2f298   Al Viro   new primitive: vm...
163
   * contiguous, to be freed by kfree().
610a77e04   Li Zefan   memdup_user(): in...
164
165
166
167
   */
  void *memdup_user(const void __user *src, size_t len)
  {
  	void *p;
6c8fcc096   Daniel Vetter   mm: don't let use...
168
  	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
610a77e04   Li Zefan   memdup_user(): in...
169
170
171
172
173
174
175
176
177
178
179
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  
  	return p;
  }
  EXPORT_SYMBOL(memdup_user);
50fd2f298   Al Viro   new primitive: vm...
180
181
182
183
184
185
  /**
   * vmemdup_user - duplicate memory region from user space
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
a862f68a8   Mike Rapoport   docs/core-api/mm:...
186
   * Return: an ERR_PTR() on failure.  Result may be not
50fd2f298   Al Viro   new primitive: vm...
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
   * physically contiguous.  Use kvfree() to free.
   */
  void *vmemdup_user(const void __user *src, size_t len)
  {
  	void *p;
  
  	p = kvmalloc(len, GFP_USER);
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kvfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  
  	return p;
  }
  EXPORT_SYMBOL(vmemdup_user);
b86181f1a   Mike Rapoport   mm/util: make str...
205
  /**
96840aa00   Davi Arnaut   [PATCH] strndup_u...
206
   * strndup_user - duplicate an existing string from user space
96840aa00   Davi Arnaut   [PATCH] strndup_u...
207
208
   * @s: The string to duplicate
   * @n: Maximum number of bytes to copy, including the trailing NUL.
a862f68a8   Mike Rapoport   docs/core-api/mm:...
209
   *
e91455217   Andrew Morton   mm/util.c: fix st...
210
   * Return: newly allocated copy of @s or an ERR_PTR() in case of error
96840aa00   Davi Arnaut   [PATCH] strndup_u...
211
212
213
214
215
216
217
218
219
220
221
222
223
   */
  char *strndup_user(const char __user *s, long n)
  {
  	char *p;
  	long length;
  
  	length = strnlen_user(s, n);
  
  	if (!length)
  		return ERR_PTR(-EFAULT);
  
  	if (length > n)
  		return ERR_PTR(-EINVAL);
90d740455   Julia Lawall   mm: use memdup_user
224
  	p = memdup_user(s, length);
96840aa00   Davi Arnaut   [PATCH] strndup_u...
225

90d740455   Julia Lawall   mm: use memdup_user
226
227
  	if (IS_ERR(p))
  		return p;
96840aa00   Davi Arnaut   [PATCH] strndup_u...
228
229
230
231
232
233
  
  	p[length - 1] = '\0';
  
  	return p;
  }
  EXPORT_SYMBOL(strndup_user);
16d69265b   Andrew Morton   uninline arch_pic...
234

e9d408e10   Al Viro   new helper: memdu...
235
236
237
238
239
240
  /**
   * memdup_user_nul - duplicate memory region from user space and NUL-terminate
   *
   * @src: source address in user space
   * @len: number of bytes to copy
   *
a862f68a8   Mike Rapoport   docs/core-api/mm:...
241
   * Return: an ERR_PTR() on failure.
e9d408e10   Al Viro   new helper: memdu...
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
   */
  void *memdup_user_nul(const void __user *src, size_t len)
  {
  	char *p;
  
  	/*
  	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
  	 * cause pagefault, which makes it pointless to use GFP_NOFS
  	 * or GFP_ATOMIC.
  	 */
  	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
  	if (!p)
  		return ERR_PTR(-ENOMEM);
  
  	if (copy_from_user(p, src, len)) {
  		kfree(p);
  		return ERR_PTR(-EFAULT);
  	}
  	p[len] = '\0';
  
  	return p;
  }
  EXPORT_SYMBOL(memdup_user_nul);
6038def0d   Namhyung Kim   mm: nommu: sort m...
265
  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
aba6dfb75   Wei Yang   mm/mmap.c: rb_par...
266
  		struct vm_area_struct *prev)
6038def0d   Namhyung Kim   mm: nommu: sort m...
267
268
269
270
271
272
273
274
  {
  	struct vm_area_struct *next;
  
  	vma->vm_prev = prev;
  	if (prev) {
  		next = prev->vm_next;
  		prev->vm_next = vma;
  	} else {
aba6dfb75   Wei Yang   mm/mmap.c: rb_par...
275
  		next = mm->mmap;
6038def0d   Namhyung Kim   mm: nommu: sort m...
276
  		mm->mmap = vma;
6038def0d   Namhyung Kim   mm: nommu: sort m...
277
278
279
280
281
  	}
  	vma->vm_next = next;
  	if (next)
  		next->vm_prev = vma;
  }
1b9fc5b24   Wei Yang   mm/mmap.c: extrac...
282
283
284
285
286
287
288
289
290
291
292
293
294
  void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
  {
  	struct vm_area_struct *prev, *next;
  
  	next = vma->vm_next;
  	prev = vma->vm_prev;
  	if (prev)
  		prev->vm_next = next;
  	else
  		mm->mmap = next;
  	if (next)
  		next->vm_prev = prev;
  }
b76437579   Siddhesh Poyarekar   procfs: mark thre...
295
  /* Check if the vma is being used as a stack by this task */
d17af5056   Andy Lutomirski   mm: Change vm_is_...
296
  int vma_is_stack_for_current(struct vm_area_struct *vma)
b76437579   Siddhesh Poyarekar   procfs: mark thre...
297
  {
d17af5056   Andy Lutomirski   mm: Change vm_is_...
298
  	struct task_struct * __maybe_unused t = current;
b76437579   Siddhesh Poyarekar   procfs: mark thre...
299
300
  	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
  }
649775be6   Alexandre Ghiti   mm, fs: move rand...
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
  #ifndef STACK_RND_MASK
  #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
  #endif
  
  unsigned long randomize_stack_top(unsigned long stack_top)
  {
  	unsigned long random_variable = 0;
  
  	if (current->flags & PF_RANDOMIZE) {
  		random_variable = get_random_long();
  		random_variable &= STACK_RND_MASK;
  		random_variable <<= PAGE_SHIFT;
  	}
  #ifdef CONFIG_STACK_GROWSUP
  	return PAGE_ALIGN(stack_top) + random_variable;
  #else
  	return PAGE_ALIGN(stack_top) - random_variable;
  #endif
  }
67f3977f8   Alexandre Ghiti   arm64, mm: move g...
320
  #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
e7142bf5d   Alexandre Ghiti   arm64, mm: make r...
321
322
323
324
325
326
327
328
  unsigned long arch_randomize_brk(struct mm_struct *mm)
  {
  	/* Is the current task 32bit ? */
  	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
  		return randomize_page(mm->brk, SZ_32M);
  
  	return randomize_page(mm->brk, SZ_1G);
  }
67f3977f8   Alexandre Ghiti   arm64, mm: move g...
329
330
331
332
333
334
335
336
337
338
339
340
341
  unsigned long arch_mmap_rnd(void)
  {
  	unsigned long rnd;
  
  #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
  	if (is_compat_task())
  		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
  	else
  #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
  		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
  
  	return rnd << PAGE_SHIFT;
  }
67f3977f8   Alexandre Ghiti   arm64, mm: move g...
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
  
  static int mmap_is_legacy(struct rlimit *rlim_stack)
  {
  	if (current->personality & ADDR_COMPAT_LAYOUT)
  		return 1;
  
  	if (rlim_stack->rlim_cur == RLIM_INFINITY)
  		return 1;
  
  	return sysctl_legacy_va_layout;
  }
  
  /*
   * Leave enough space between the mmap area and the stack to honour ulimit in
   * the face of randomisation.
   */
  #define MIN_GAP		(SZ_128M)
  #define MAX_GAP		(STACK_TOP / 6 * 5)
  
  static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
  {
  	unsigned long gap = rlim_stack->rlim_cur;
  	unsigned long pad = stack_guard_gap;
  
  	/* Account for stack randomization if necessary */
  	if (current->flags & PF_RANDOMIZE)
  		pad += (STACK_RND_MASK << PAGE_SHIFT);
  
  	/* Values close to RLIM_INFINITY can overflow. */
  	if (gap + pad > gap)
  		gap += pad;
  
  	if (gap < MIN_GAP)
  		gap = MIN_GAP;
  	else if (gap > MAX_GAP)
  		gap = MAX_GAP;
  
  	return PAGE_ALIGN(STACK_TOP - gap - rnd);
  }
  
  void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
  {
  	unsigned long random_factor = 0UL;
  
  	if (current->flags & PF_RANDOMIZE)
  		random_factor = arch_mmap_rnd();
  
  	if (mmap_is_legacy(rlim_stack)) {
  		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
  		mm->get_unmapped_area = arch_get_unmapped_area;
  	} else {
  		mm->mmap_base = mmap_base(random_factor, rlim_stack);
  		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  	}
  }
  #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
8f2af155b   Kees Cook   exec: pass stack ...
398
  void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
16d69265b   Andrew Morton   uninline arch_pic...
399
400
401
  {
  	mm->mmap_base = TASK_UNMAPPED_BASE;
  	mm->get_unmapped_area = arch_get_unmapped_area;
16d69265b   Andrew Morton   uninline arch_pic...
402
403
  }
  #endif
912985dce   Rusty Russell   mm: Make generic ...
404

79eb597cb   Daniel Jordan   mm: add account_l...
405
406
407
408
409
410
411
412
413
  /**
   * __account_locked_vm - account locked pages to an mm's locked_vm
   * @mm:          mm to account against
   * @pages:       number of pages to account
   * @inc:         %true if @pages should be considered positive, %false if not
   * @task:        task used to check RLIMIT_MEMLOCK
   * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
   *
   * Assumes @task and @mm are valid (i.e. at least one reference on each), and
c1e8d7c6a   Michel Lespinasse   mmap locking API:...
414
   * that mmap_lock is held as writer.
79eb597cb   Daniel Jordan   mm: add account_l...
415
416
417
418
419
420
421
422
423
424
   *
   * Return:
   * * 0       on success
   * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
   */
  int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
  			struct task_struct *task, bool bypass_rlim)
  {
  	unsigned long locked_vm, limit;
  	int ret = 0;
42fc54140   Michel Lespinasse   mmap locking API:...
425
  	mmap_assert_write_locked(mm);
79eb597cb   Daniel Jordan   mm: add account_l...
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
  
  	locked_vm = mm->locked_vm;
  	if (inc) {
  		if (!bypass_rlim) {
  			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  			if (locked_vm + pages > limit)
  				ret = -ENOMEM;
  		}
  		if (!ret)
  			mm->locked_vm = locked_vm + pages;
  	} else {
  		WARN_ON_ONCE(pages > locked_vm);
  		mm->locked_vm = locked_vm - pages;
  	}
  
  	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s
  ", __func__, task->pid,
  		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
  		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
  		 ret ? " - exceeded" : "");
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(__account_locked_vm);
  
  /**
   * account_locked_vm - account locked pages to an mm's locked_vm
   * @mm:          mm to account against, may be NULL
   * @pages:       number of pages to account
   * @inc:         %true if @pages should be considered positive, %false if not
   *
   * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
   *
   * Return:
   * * 0       on success, or if mm is NULL
   * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
   */
  int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
  {
  	int ret;
  
  	if (pages == 0 || !mm)
  		return 0;
d8ed45c5d   Michel Lespinasse   mmap locking API:...
469
  	mmap_write_lock(mm);
79eb597cb   Daniel Jordan   mm: add account_l...
470
471
  	ret = __account_locked_vm(mm, pages, inc, current,
  				  capable(CAP_IPC_LOCK));
d8ed45c5d   Michel Lespinasse   mmap locking API:...
472
  	mmap_write_unlock(mm);
79eb597cb   Daniel Jordan   mm: add account_l...
473
474
475
476
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(account_locked_vm);
eb36c5873   Al Viro   new helper: vm_mm...
477
478
  unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
9fbeb5ab5   Michal Hocko   mm: make vm_mmap ...
479
  	unsigned long flag, unsigned long pgoff)
eb36c5873   Al Viro   new helper: vm_mm...
480
481
482
  {
  	unsigned long ret;
  	struct mm_struct *mm = current->mm;
41badc15c   Michel Lespinasse   mm: make do_mmap_...
483
  	unsigned long populate;
897ab3e0c   Mike Rapoport   userfaultfd: non-...
484
  	LIST_HEAD(uf);
eb36c5873   Al Viro   new helper: vm_mm...
485
486
487
  
  	ret = security_mmap_file(file, prot, flag);
  	if (!ret) {
d8ed45c5d   Michel Lespinasse   mmap locking API:...
488
  		if (mmap_write_lock_killable(mm))
9fbeb5ab5   Michal Hocko   mm: make vm_mmap ...
489
  			return -EINTR;
45e55300f   Peter Collingbourne   mm: remove unnece...
490
491
  		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
  			      &uf);
d8ed45c5d   Michel Lespinasse   mmap locking API:...
492
  		mmap_write_unlock(mm);
897ab3e0c   Mike Rapoport   userfaultfd: non-...
493
  		userfaultfd_unmap_complete(mm, &uf);
41badc15c   Michel Lespinasse   mm: make do_mmap_...
494
495
  		if (populate)
  			mm_populate(ret, populate);
eb36c5873   Al Viro   new helper: vm_mm...
496
497
498
499
500
501
502
503
504
505
  	}
  	return ret;
  }
  
  unsigned long vm_mmap(struct file *file, unsigned long addr,
  	unsigned long len, unsigned long prot,
  	unsigned long flag, unsigned long offset)
  {
  	if (unlikely(offset + PAGE_ALIGN(len) < offset))
  		return -EINVAL;
ea53cde08   Alexander Kuleshov   mm/util: use offs...
506
  	if (unlikely(offset_in_page(offset)))
eb36c5873   Al Viro   new helper: vm_mm...
507
  		return -EINVAL;
9fbeb5ab5   Michal Hocko   mm: make vm_mmap ...
508
  	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
eb36c5873   Al Viro   new helper: vm_mm...
509
510
  }
  EXPORT_SYMBOL(vm_mmap);
a7c3e901a   Michal Hocko   mm: introduce kv[...
511
512
513
514
515
516
517
518
519
520
  /**
   * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
   * failure, fall back to non-contiguous (vmalloc) allocation.
   * @size: size of the request.
   * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
   * @node: numa node to allocate from
   *
   * Uses kmalloc to get the memory but if the allocation fails then falls back
   * to the vmalloc allocator. Use kvfree for freeing the memory.
   *
cc965a29d   Michal Hocko   mm: kvmalloc supp...
521
522
523
   * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
   * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
   * preferable to the vmalloc fallback, due to visible performance drawbacks.
a7c3e901a   Michal Hocko   mm: introduce kv[...
524
   *
ce91f6ee5   Michal Hocko   mm: kvmalloc does...
525
526
   * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
   * fall back to vmalloc.
a862f68a8   Mike Rapoport   docs/core-api/mm:...
527
528
   *
   * Return: pointer to the allocated memory of %NULL in case of failure
a7c3e901a   Michal Hocko   mm: introduce kv[...
529
530
531
532
533
534
535
536
537
538
   */
  void *kvmalloc_node(size_t size, gfp_t flags, int node)
  {
  	gfp_t kmalloc_flags = flags;
  	void *ret;
  
  	/*
  	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
  	 * so the given set of flags has to be compatible.
  	 */
ce91f6ee5   Michal Hocko   mm: kvmalloc does...
539
540
  	if ((flags & GFP_KERNEL) != GFP_KERNEL)
  		return kmalloc_node(size, flags, node);
a7c3e901a   Michal Hocko   mm: introduce kv[...
541
542
  
  	/*
4f4f2ba9c   Michal Hocko   mm: clarify why w...
543
544
545
546
547
  	 * We want to attempt a large physically contiguous block first because
  	 * it is less likely to fragment multiple larger blocks and therefore
  	 * contribute to a long term fragmentation less than vmalloc fallback.
  	 * However make sure that larger requests are not too disruptive - no
  	 * OOM killer and no allocation failure warnings as we have a fallback.
a7c3e901a   Michal Hocko   mm: introduce kv[...
548
  	 */
6c5ab6511   Michal Hocko   mm: support __GFP...
549
550
  	if (size > PAGE_SIZE) {
  		kmalloc_flags |= __GFP_NOWARN;
cc965a29d   Michal Hocko   mm: kvmalloc supp...
551
  		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
6c5ab6511   Michal Hocko   mm: support __GFP...
552
553
  			kmalloc_flags |= __GFP_NORETRY;
  	}
a7c3e901a   Michal Hocko   mm: introduce kv[...
554
555
556
557
558
559
560
561
562
  
  	ret = kmalloc_node(size, kmalloc_flags, node);
  
  	/*
  	 * It doesn't really make sense to fallback to vmalloc for sub page
  	 * requests
  	 */
  	if (ret || size <= PAGE_SIZE)
  		return ret;
2b9059489   Christoph Hellwig   mm: remove __vmal...
563
  	return __vmalloc_node(size, 1, flags, node,
8594a21cf   Michal Hocko   mm, vmalloc: fix ...
564
  			__builtin_return_address(0));
a7c3e901a   Michal Hocko   mm: introduce kv[...
565
566
  }
  EXPORT_SYMBOL(kvmalloc_node);
ff4dc7729   Mike Rapoport   mm/util: add kern...
567
  /**
04b8e9460   Andrew Morton   mm/util.c: improv...
568
569
   * kvfree() - Free memory.
   * @addr: Pointer to allocated memory.
ff4dc7729   Mike Rapoport   mm/util: add kern...
570
   *
04b8e9460   Andrew Morton   mm/util.c: improv...
571
572
573
574
   * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
   * It is slightly more efficient to use kfree() or vfree() if you are certain
   * that you know which one to use.
   *
52414d330   Andrey Ryabinin   kvfree(): fix mis...
575
   * Context: Either preemptible task context or not-NMI interrupt.
ff4dc7729   Mike Rapoport   mm/util: add kern...
576
   */
39f1f78d5   Al Viro   nick kvfree() fro...
577
578
579
580
581
582
583
584
  void kvfree(const void *addr)
  {
  	if (is_vmalloc_addr(addr))
  		vfree(addr);
  	else
  		kfree(addr);
  }
  EXPORT_SYMBOL(kvfree);
d4eaa2837   Waiman Long   mm: add kvfree_se...
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
  /**
   * kvfree_sensitive - Free a data object containing sensitive information.
   * @addr: address of the data object to be freed.
   * @len: length of the data object.
   *
   * Use the special memzero_explicit() function to clear the content of a
   * kvmalloc'ed object containing sensitive data to make sure that the
   * compiler won't optimize out the data clearing.
   */
  void kvfree_sensitive(const void *addr, size_t len)
  {
  	if (likely(!ZERO_OR_NULL_PTR(addr))) {
  		memzero_explicit((void *)addr, len);
  		kvfree(addr);
  	}
  }
  EXPORT_SYMBOL(kvfree_sensitive);
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
  static inline void *__page_rmapping(struct page *page)
  {
  	unsigned long mapping;
  
  	mapping = (unsigned long)page->mapping;
  	mapping &= ~PAGE_MAPPING_FLAGS;
  
  	return (void *)mapping;
  }
  
  /* Neutral page->mapping pointer to address_space or anon_vma or other */
  void *page_rmapping(struct page *page)
  {
  	page = compound_head(page);
  	return __page_rmapping(page);
  }
1aa8aea53   Andrew Morton   mm: uninline page...
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
  /*
   * Return true if this page is mapped into pagetables.
   * For compound page it returns true if any subpage of compound page is mapped.
   */
  bool page_mapped(struct page *page)
  {
  	int i;
  
  	if (likely(!PageCompound(page)))
  		return atomic_read(&page->_mapcount) >= 0;
  	page = compound_head(page);
  	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
  		return true;
  	if (PageHuge(page))
  		return false;
d8c6546b1   Matthew Wilcox (Oracle)   mm: introduce com...
633
  	for (i = 0; i < compound_nr(page); i++) {
1aa8aea53   Andrew Morton   mm: uninline page...
634
635
636
637
638
639
  		if (atomic_read(&page[i]._mapcount) >= 0)
  			return true;
  	}
  	return false;
  }
  EXPORT_SYMBOL(page_mapped);
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
640
641
642
643
644
645
646
647
648
649
  struct anon_vma *page_anon_vma(struct page *page)
  {
  	unsigned long mapping;
  
  	page = compound_head(page);
  	mapping = (unsigned long)page->mapping;
  	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
  		return NULL;
  	return __page_rmapping(page);
  }
9800339b5   Shaohua Li   mm: don't inline ...
650
651
  struct address_space *page_mapping(struct page *page)
  {
1c290f642   Kirill A. Shutemov   mm: sanitize page...
652
653
654
  	struct address_space *mapping;
  
  	page = compound_head(page);
9800339b5   Shaohua Li   mm: don't inline ...
655

03e5ac2fc   Mikulas Patocka   mm: fix crash whe...
656
657
658
  	/* This happens if someone calls flush_dcache_page on slab page */
  	if (unlikely(PageSlab(page)))
  		return NULL;
33806f06d   Shaohua Li   swap: make each s...
659
660
661
662
  	if (unlikely(PageSwapCache(page))) {
  		swp_entry_t entry;
  
  		entry.val = page_private(page);
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
663
664
  		return swap_address_space(entry);
  	}
1c290f642   Kirill A. Shutemov   mm: sanitize page...
665
  	mapping = page->mapping;
bda807d44   Minchan Kim   mm: migrate: supp...
666
  	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
e39155ea1   Kirill A. Shutemov   mm: uninline and ...
667
  		return NULL;
bda807d44   Minchan Kim   mm: migrate: supp...
668
669
  
  	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
9800339b5   Shaohua Li   mm: don't inline ...
670
  }
bda807d44   Minchan Kim   mm: migrate: supp...
671
  EXPORT_SYMBOL(page_mapping);
9800339b5   Shaohua Li   mm: don't inline ...
672

cb9f753a3   Huang Ying   mm: fix races bet...
673
674
675
676
677
678
679
680
681
  /*
   * For file cache pages, return the address_space, otherwise return NULL
   */
  struct address_space *page_mapping_file(struct page *page)
  {
  	if (unlikely(PageSwapCache(page)))
  		return NULL;
  	return page_mapping(page);
  }
b20ce5e03   Kirill A. Shutemov   mm: prepare page_...
682
683
684
685
686
687
  /* Slow path of page_mapcount() for compound pages */
  int __page_mapcount(struct page *page)
  {
  	int ret;
  
  	ret = atomic_read(&page->_mapcount) + 1;
dd78fedde   Kirill A. Shutemov   rmap: support fil...
688
689
690
691
692
693
  	/*
  	 * For file THP page->_mapcount contains total number of mapping
  	 * of the page: no need to look into compound_mapcount.
  	 */
  	if (!PageAnon(page) && !PageHuge(page))
  		return ret;
b20ce5e03   Kirill A. Shutemov   mm: prepare page_...
694
695
696
697
698
699
700
  	page = compound_head(page);
  	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
  	if (PageDoubleMap(page))
  		ret--;
  	return ret;
  }
  EXPORT_SYMBOL_GPL(__page_mapcount);
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
701
702
703
704
705
706
  int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
  int sysctl_overcommit_ratio __read_mostly = 50;
  unsigned long sysctl_overcommit_kbytes __read_mostly;
  int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
  unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
  unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
32927393d   Christoph Hellwig   sysctl: pass kern...
707
708
  int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
  		size_t *lenp, loff_t *ppos)
49f0ce5f9   Jerome Marchand   mm: add overcommi...
709
710
711
712
713
714
715
716
  {
  	int ret;
  
  	ret = proc_dointvec(table, write, buffer, lenp, ppos);
  	if (ret == 0 && write)
  		sysctl_overcommit_kbytes = 0;
  	return ret;
  }
56f3547bf   Feng Tang   mm: adjust vm_com...
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
  static void sync_overcommit_as(struct work_struct *dummy)
  {
  	percpu_counter_sync(&vm_committed_as);
  }
  
  int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
  		size_t *lenp, loff_t *ppos)
  {
  	struct ctl_table t;
  	int new_policy;
  	int ret;
  
  	/*
  	 * The deviation of sync_overcommit_as could be big with loose policy
  	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
  	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
  	 * with the strict "NEVER", and to avoid possible race condtion (even
  	 * though user usually won't too frequently do the switching to policy
  	 * OVERCOMMIT_NEVER), the switch is done in the following order:
  	 *	1. changing the batch
  	 *	2. sync percpu count on each CPU
  	 *	3. switch the policy
  	 */
  	if (write) {
  		t = *table;
  		t.data = &new_policy;
  		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
  		if (ret)
  			return ret;
  
  		mm_compute_batch(new_policy);
  		if (new_policy == OVERCOMMIT_NEVER)
  			schedule_on_each_cpu(sync_overcommit_as);
  		sysctl_overcommit_memory = new_policy;
  	} else {
  		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  	}
  
  	return ret;
  }
32927393d   Christoph Hellwig   sysctl: pass kern...
757
758
  int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
  		size_t *lenp, loff_t *ppos)
49f0ce5f9   Jerome Marchand   mm: add overcommi...
759
760
761
762
763
764
765
766
  {
  	int ret;
  
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
  	if (ret == 0 && write)
  		sysctl_overcommit_ratio = 0;
  	return ret;
  }
00619bcc4   Jerome Marchand   mm: factor commit...
767
768
769
770
771
  /*
   * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
   */
  unsigned long vm_commit_limit(void)
  {
49f0ce5f9   Jerome Marchand   mm: add overcommi...
772
773
774
775
776
  	unsigned long allowed;
  
  	if (sysctl_overcommit_kbytes)
  		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
  	else
ca79b0c21   Arun KS   mm: convert total...
777
  		allowed = ((totalram_pages() - hugetlb_total_pages())
49f0ce5f9   Jerome Marchand   mm: add overcommi...
778
779
780
781
  			   * sysctl_overcommit_ratio / 100);
  	allowed += total_swap_pages;
  
  	return allowed;
00619bcc4   Jerome Marchand   mm: factor commit...
782
  }
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
783
784
785
786
787
788
789
790
791
792
793
794
795
  /*
   * Make sure vm_committed_as in one cacheline and not cacheline shared with
   * other variables. It can be updated by several CPUs frequently.
   */
  struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
  
  /*
   * The global memory commitment made in the system can be a metric
   * that can be used to drive ballooning decisions when Linux is hosted
   * as a guest. On Hyper-V, the host implements a policy engine for dynamically
   * balancing memory across competing virtual machines that are hosted.
   * Several metrics drive this policy engine including the guest reported
   * memory commitment.
4e2ee51e8   Feng Tang   mm/util.c: make v...
796
797
798
799
800
   *
   * The time cost of this is very low for small platforms, and for big
   * platform like a 2S/36C/72T Skylake server, in worst case where
   * vm_committed_as's spinlock is under severe contention, the time cost
   * could be about 30~40 microseconds.
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
801
802
803
   */
  unsigned long vm_memory_committed(void)
  {
4e2ee51e8   Feng Tang   mm/util.c: make v...
804
  	return percpu_counter_sum_positive(&vm_committed_as);
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
805
806
807
808
809
810
811
812
813
  }
  EXPORT_SYMBOL_GPL(vm_memory_committed);
  
  /*
   * Check that a process has enough memory to allocate a new virtual
   * mapping. 0 means there is enough memory for the allocation to
   * succeed and -ENOMEM implies there is not.
   *
   * We currently support three overcommit policies, which are set via the
ad56b738c   Mike Rapoport   docs/vm: rename d...
814
   * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
815
816
817
818
819
820
821
822
823
824
825
   *
   * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
   * Additional code 2002 Jul 20 by Robert Love.
   *
   * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
   *
   * Note this is a helper function intended to be used by LSMs which
   * wish to use this logic.
   */
  int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
  {
8c7829b04   Johannes Weiner   mm: fix false-pos...
826
  	long allowed;
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
827

39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
828
829
830
831
832
833
834
835
836
  	vm_acct_memory(pages);
  
  	/*
  	 * Sometimes we want to use more memory than we have
  	 */
  	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
  		return 0;
  
  	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
8c7829b04   Johannes Weiner   mm: fix false-pos...
837
  		if (pages > totalram_pages() + total_swap_pages)
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
838
  			goto error;
8c7829b04   Johannes Weiner   mm: fix false-pos...
839
  		return 0;
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
840
841
842
843
844
845
846
847
848
849
850
851
852
  	}
  
  	allowed = vm_commit_limit();
  	/*
  	 * Reserve some for root
  	 */
  	if (!cap_sys_admin)
  		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
  
  	/*
  	 * Don't let a single process grow so big a user can't recover
  	 */
  	if (mm) {
8c7829b04   Johannes Weiner   mm: fix false-pos...
853
  		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
39a1aa8e1   Andrey Ryabinin   mm: deduplicate m...
854
855
856
857
858
859
860
861
862
863
  		allowed -= min_t(long, mm->total_vm / 32, reserve);
  	}
  
  	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
  		return 0;
  error:
  	vm_unacct_memory(pages);
  
  	return -ENOMEM;
  }
a90902531   William Roberts   mm: Create utilit...
864
865
866
867
868
869
  /**
   * get_cmdline() - copy the cmdline value to a buffer.
   * @task:     the task whose cmdline value to copy.
   * @buffer:   the buffer to copy to.
   * @buflen:   the length of the buffer. Larger cmdline values are truncated
   *            to this length.
a862f68a8   Mike Rapoport   docs/core-api/mm:...
870
871
   *
   * Return: the size of the cmdline field copied. Note that the copy does
a90902531   William Roberts   mm: Create utilit...
872
873
874
875
876
877
878
   * not guarantee an ending NULL byte.
   */
  int get_cmdline(struct task_struct *task, char *buffer, int buflen)
  {
  	int res = 0;
  	unsigned int len;
  	struct mm_struct *mm = get_task_mm(task);
a3b609ef9   Mateusz Guzik   proc read mm's {a...
879
  	unsigned long arg_start, arg_end, env_start, env_end;
a90902531   William Roberts   mm: Create utilit...
880
881
882
883
  	if (!mm)
  		goto out;
  	if (!mm->arg_end)
  		goto out_mm;	/* Shh! No looking before we're done */
bc81426f5   Michal Koutný   prctl_set_mm: dow...
884
  	spin_lock(&mm->arg_lock);
a3b609ef9   Mateusz Guzik   proc read mm's {a...
885
886
887
888
  	arg_start = mm->arg_start;
  	arg_end = mm->arg_end;
  	env_start = mm->env_start;
  	env_end = mm->env_end;
bc81426f5   Michal Koutný   prctl_set_mm: dow...
889
  	spin_unlock(&mm->arg_lock);
a3b609ef9   Mateusz Guzik   proc read mm's {a...
890
891
  
  	len = arg_end - arg_start;
a90902531   William Roberts   mm: Create utilit...
892
893
894
  
  	if (len > buflen)
  		len = buflen;
f307ab6dc   Lorenzo Stoakes   mm: replace acces...
895
  	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
a90902531   William Roberts   mm: Create utilit...
896
897
898
899
900
901
902
903
904
905
  
  	/*
  	 * If the nul at the end of args has been overwritten, then
  	 * assume application is using setproctitle(3).
  	 */
  	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
  		len = strnlen(buffer, res);
  		if (len < res) {
  			res = len;
  		} else {
a3b609ef9   Mateusz Guzik   proc read mm's {a...
906
  			len = env_end - env_start;
a90902531   William Roberts   mm: Create utilit...
907
908
  			if (len > buflen - res)
  				len = buflen - res;
a3b609ef9   Mateusz Guzik   proc read mm's {a...
909
  			res += access_process_vm(task, env_start,
f307ab6dc   Lorenzo Stoakes   mm: replace acces...
910
911
  						 buffer+res, len,
  						 FOLL_FORCE);
a90902531   William Roberts   mm: Create utilit...
912
913
914
915
916
917
918
919
  			res = strnlen(buffer, res);
  		}
  	}
  out_mm:
  	mmput(mm);
  out:
  	return res;
  }
010c164a5   Song Liu   mm: move memcmp_p...
920

4d1a8a2dc   Catalin Marinas   arm64: mte: Tags-...
921
  int __weak memcmp_pages(struct page *page1, struct page *page2)
010c164a5   Song Liu   mm: move memcmp_p...
922
923
924
925
926
927
928
929
930
931
932
  {
  	char *addr1, *addr2;
  	int ret;
  
  	addr1 = kmap_atomic(page1);
  	addr2 = kmap_atomic(page2);
  	ret = memcmp(addr1, addr2, PAGE_SIZE);
  	kunmap_atomic(addr2);
  	kunmap_atomic(addr1);
  	return ret;
  }