Blame view

mm/usercopy.c 9.54 KB
d2912cb15   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
f5509cc18   Kees Cook   mm: Hardened user...
2
3
4
5
6
7
8
9
  /*
   * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
   * which are designed to protect kernel memory from needless exposure
   * and overwrite under many unintended conditions. This code is based
   * on PAX_USERCOPY, which is:
   *
   * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
   * Security Inc.
f5509cc18   Kees Cook   mm: Hardened user...
10
11
12
13
   */
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  
  #include <linux/mm.h>
314eed30e   Kees Cook   usercopy: Avoid H...
14
  #include <linux/highmem.h>
f5509cc18   Kees Cook   mm: Hardened user...
15
  #include <linux/slab.h>
5b825c3af   Ingo Molnar   sched/headers: Pr...
16
  #include <linux/sched.h>
299300258   Ingo Molnar   sched/headers: Pr...
17
18
  #include <linux/sched/task.h>
  #include <linux/sched/task_stack.h>
96dc4f9fb   Sahara   usercopy: Move en...
19
  #include <linux/thread_info.h>
b5cb15d93   Chris von Recklinghausen   usercopy: Allow b...
20
21
  #include <linux/atomic.h>
  #include <linux/jump_label.h>
f5509cc18   Kees Cook   mm: Hardened user...
22
  #include <asm/sections.h>
f5509cc18   Kees Cook   mm: Hardened user...
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
  /*
   * Checks if a given pointer and length is contained by the current
   * stack frame (if possible).
   *
   * Returns:
   *	NOT_STACK: not at all on the stack
   *	GOOD_FRAME: fully within a valid stack frame
   *	GOOD_STACK: fully on the stack (when can't do frame-checking)
   *	BAD_STACK: error condition (invalid stack position or bad stack frame)
   */
  static noinline int check_stack_object(const void *obj, unsigned long len)
  {
  	const void * const stack = task_stack_page(current);
  	const void * const stackend = stack + THREAD_SIZE;
  	int ret;
  
  	/* Object is not on the stack at all. */
  	if (obj + len <= stack || stackend <= obj)
  		return NOT_STACK;
  
  	/*
  	 * Reject: object partially overlaps the stack (passing the
5ce1be0e4   Randy Dunlap   mm/usercopy.c: de...
45
  	 * check above means at least one end is within the stack,
f5509cc18   Kees Cook   mm: Hardened user...
46
47
48
49
50
51
52
53
54
55
56
57
  	 * so if this check fails, the other end is outside the stack).
  	 */
  	if (obj < stack || stackend < obj + len)
  		return BAD_STACK;
  
  	/* Check if object is safely within a valid frame. */
  	ret = arch_within_stack_frames(stack, stackend, obj, len);
  	if (ret)
  		return ret;
  
  	return GOOD_STACK;
  }
b394d468e   Kees Cook   usercopy: Enhance...
58
  /*
afcc90f86   Kees Cook   usercopy: WARN() ...
59
60
   * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
   * an unexpected state during a copy_from_user() or copy_to_user() call.
b394d468e   Kees Cook   usercopy: Enhance...
61
62
63
   * There are several checks being performed on the buffer by the
   * __check_object_size() function. Normal stack buffer usage should never
   * trip the checks, and kernel text addressing will always trip the check.
afcc90f86   Kees Cook   usercopy: WARN() ...
64
65
66
67
68
   * For cache objects, it is checking that only the whitelisted range of
   * bytes for a given cache is being accessed (via the cache's usersize and
   * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
   * kmem_cache_create_usercopy() function to create the cache (and
   * carefully audit the whitelist range).
b394d468e   Kees Cook   usercopy: Enhance...
69
   */
afcc90f86   Kees Cook   usercopy: WARN() ...
70
71
72
73
74
75
76
77
78
79
80
  void usercopy_warn(const char *name, const char *detail, bool to_user,
  		   unsigned long offset, unsigned long len)
  {
  	WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!
  ",
  		 to_user ? "exposure" : "overwrite",
  		 to_user ? "from" : "to",
  		 name ? : "unknown?!",
  		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
  		 offset, len);
  }
b394d468e   Kees Cook   usercopy: Enhance...
81
82
83
  void __noreturn usercopy_abort(const char *name, const char *detail,
  			       bool to_user, unsigned long offset,
  			       unsigned long len)
f5509cc18   Kees Cook   mm: Hardened user...
84
  {
b394d468e   Kees Cook   usercopy: Enhance...
85
86
87
88
89
90
91
  	pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!
  ",
  		 to_user ? "exposure" : "overwrite",
  		 to_user ? "from" : "to",
  		 name ? : "unknown?!",
  		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
  		 offset, len);
f5509cc18   Kees Cook   mm: Hardened user...
92
93
94
95
96
97
98
99
100
  	/*
  	 * For greater effect, it would be nice to do do_group_exit(),
  	 * but BUG() actually hooks all the lock-breaking and per-arch
  	 * Oops code, so that is used here instead.
  	 */
  	BUG();
  }
  
  /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
f4e6e289c   Kees Cook   usercopy: Include...
101
102
  static bool overlaps(const unsigned long ptr, unsigned long n,
  		     unsigned long low, unsigned long high)
f5509cc18   Kees Cook   mm: Hardened user...
103
  {
f4e6e289c   Kees Cook   usercopy: Include...
104
  	const unsigned long check_low = ptr;
f5509cc18   Kees Cook   mm: Hardened user...
105
106
107
  	unsigned long check_high = check_low + n;
  
  	/* Does not overlap if entirely above or entirely below. */
94cd97af6   Josh Poimboeuf   usercopy: fix ove...
108
  	if (check_low >= high || check_high <= low)
f5509cc18   Kees Cook   mm: Hardened user...
109
110
111
112
113
114
  		return false;
  
  	return true;
  }
  
  /* Is this address range in the kernel text area? */
f4e6e289c   Kees Cook   usercopy: Include...
115
116
  static inline void check_kernel_text_object(const unsigned long ptr,
  					    unsigned long n, bool to_user)
f5509cc18   Kees Cook   mm: Hardened user...
117
118
119
120
121
122
  {
  	unsigned long textlow = (unsigned long)_stext;
  	unsigned long texthigh = (unsigned long)_etext;
  	unsigned long textlow_linear, texthigh_linear;
  
  	if (overlaps(ptr, n, textlow, texthigh))
f4e6e289c   Kees Cook   usercopy: Include...
123
  		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
f5509cc18   Kees Cook   mm: Hardened user...
124
125
126
127
128
129
130
131
132
  
  	/*
  	 * Some architectures have virtual memory mappings with a secondary
  	 * mapping of the kernel text, i.e. there is more than one virtual
  	 * kernel address that points to the kernel image. It is usually
  	 * when there is a separate linear physical memory mapping, in that
  	 * __pa() is not just the reverse of __va(). This can be detected
  	 * and checked:
  	 */
46f6236aa   Laura Abbott   mm/usercopy: Swit...
133
  	textlow_linear = (unsigned long)lm_alias(textlow);
f5509cc18   Kees Cook   mm: Hardened user...
134
135
  	/* No different mapping: we're done. */
  	if (textlow_linear == textlow)
f4e6e289c   Kees Cook   usercopy: Include...
136
  		return;
f5509cc18   Kees Cook   mm: Hardened user...
137
138
  
  	/* Check the secondary mapping... */
46f6236aa   Laura Abbott   mm/usercopy: Swit...
139
  	texthigh_linear = (unsigned long)lm_alias(texthigh);
f5509cc18   Kees Cook   mm: Hardened user...
140
  	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
f4e6e289c   Kees Cook   usercopy: Include...
141
142
  		usercopy_abort("linear kernel text", NULL, to_user,
  			       ptr - textlow_linear, n);
f5509cc18   Kees Cook   mm: Hardened user...
143
  }
f4e6e289c   Kees Cook   usercopy: Include...
144
145
  static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
  				       bool to_user)
f5509cc18   Kees Cook   mm: Hardened user...
146
147
  {
  	/* Reject if object wraps past end of memory. */
951531691   Isaac J. Manjarres   mm/usercopy: use ...
148
  	if (ptr + (n - 1) < ptr)
f4e6e289c   Kees Cook   usercopy: Include...
149
  		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
f5509cc18   Kees Cook   mm: Hardened user...
150
151
152
  
  	/* Reject if NULL or ZERO-allocation. */
  	if (ZERO_OR_NULL_PTR(ptr))
f4e6e289c   Kees Cook   usercopy: Include...
153
  		usercopy_abort("null address", NULL, to_user, ptr, n);
f5509cc18   Kees Cook   mm: Hardened user...
154
  }
8e1f74ea0   Kees Cook   usercopy: remove ...
155
  /* Checks for allocs that are marked in some way as spanning multiple pages. */
f4e6e289c   Kees Cook   usercopy: Include...
156
157
  static inline void check_page_span(const void *ptr, unsigned long n,
  				   struct page *page, bool to_user)
f5509cc18   Kees Cook   mm: Hardened user...
158
  {
8e1f74ea0   Kees Cook   usercopy: remove ...
159
  #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
f5509cc18   Kees Cook   mm: Hardened user...
160
  	const void *end = ptr + n - 1;
8e1f74ea0   Kees Cook   usercopy: remove ...
161
  	struct page *endpage;
f5509cc18   Kees Cook   mm: Hardened user...
162
163
164
  	bool is_reserved, is_cma;
  
  	/*
f5509cc18   Kees Cook   mm: Hardened user...
165
166
167
168
169
170
171
172
173
  	 * Sometimes the kernel data regions are not marked Reserved (see
  	 * check below). And sometimes [_sdata,_edata) does not cover
  	 * rodata and/or bss, so check each range explicitly.
  	 */
  
  	/* Allow reads of kernel rodata region (if not marked as Reserved). */
  	if (ptr >= (const void *)__start_rodata &&
  	    end <= (const void *)__end_rodata) {
  		if (!to_user)
f4e6e289c   Kees Cook   usercopy: Include...
174
175
  			usercopy_abort("rodata", NULL, to_user, 0, n);
  		return;
f5509cc18   Kees Cook   mm: Hardened user...
176
177
178
179
  	}
  
  	/* Allow kernel data region (if not marked as Reserved). */
  	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
f4e6e289c   Kees Cook   usercopy: Include...
180
  		return;
f5509cc18   Kees Cook   mm: Hardened user...
181
182
183
184
  
  	/* Allow kernel bss region (if not marked as Reserved). */
  	if (ptr >= (const void *)__bss_start &&
  	    end <= (const void *)__bss_stop)
f4e6e289c   Kees Cook   usercopy: Include...
185
  		return;
f5509cc18   Kees Cook   mm: Hardened user...
186
187
188
189
  
  	/* Is the object wholly within one base page? */
  	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
  		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
f4e6e289c   Kees Cook   usercopy: Include...
190
  		return;
f5509cc18   Kees Cook   mm: Hardened user...
191

8e1f74ea0   Kees Cook   usercopy: remove ...
192
  	/* Allow if fully inside the same compound (__GFP_COMP) page. */
f5509cc18   Kees Cook   mm: Hardened user...
193
194
  	endpage = virt_to_head_page(end);
  	if (likely(endpage == page))
f4e6e289c   Kees Cook   usercopy: Include...
195
  		return;
f5509cc18   Kees Cook   mm: Hardened user...
196
197
198
199
200
201
202
203
204
  
  	/*
  	 * Reject if range is entirely either Reserved (i.e. special or
  	 * device memory), or CMA. Otherwise, reject since the object spans
  	 * several independently allocated pages.
  	 */
  	is_reserved = PageReserved(page);
  	is_cma = is_migrate_cma_page(page);
  	if (!is_reserved && !is_cma)
f4e6e289c   Kees Cook   usercopy: Include...
205
  		usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
f5509cc18   Kees Cook   mm: Hardened user...
206
207
208
209
  
  	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
  		page = virt_to_head_page(ptr);
  		if (is_reserved && !PageReserved(page))
f4e6e289c   Kees Cook   usercopy: Include...
210
211
  			usercopy_abort("spans Reserved and non-Reserved pages",
  				       NULL, to_user, 0, n);
f5509cc18   Kees Cook   mm: Hardened user...
212
  		if (is_cma && !is_migrate_cma_page(page))
f4e6e289c   Kees Cook   usercopy: Include...
213
214
  			usercopy_abort("spans CMA and non-CMA pages", NULL,
  				       to_user, 0, n);
f5509cc18   Kees Cook   mm: Hardened user...
215
  	}
8e1f74ea0   Kees Cook   usercopy: remove ...
216
  #endif
8e1f74ea0   Kees Cook   usercopy: remove ...
217
  }
f4e6e289c   Kees Cook   usercopy: Include...
218
219
  static inline void check_heap_object(const void *ptr, unsigned long n,
  				     bool to_user)
8e1f74ea0   Kees Cook   usercopy: remove ...
220
221
  {
  	struct page *page;
8e1f74ea0   Kees Cook   usercopy: remove ...
222
  	if (!virt_addr_valid(ptr))
f4e6e289c   Kees Cook   usercopy: Include...
223
  		return;
8e1f74ea0   Kees Cook   usercopy: remove ...
224

314eed30e   Kees Cook   usercopy: Avoid H...
225
226
227
228
229
230
  	/*
  	 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
  	 * highmem page or fallback to virt_to_page(). The following
  	 * is effectively a highmem-aware virt_to_head_page().
  	 */
  	page = compound_head(kmap_to_page((void *)ptr));
8e1f74ea0   Kees Cook   usercopy: remove ...
231

f4e6e289c   Kees Cook   usercopy: Include...
232
233
234
235
236
237
238
  	if (PageSlab(page)) {
  		/* Check slab allocator for flags and size. */
  		__check_heap_object(ptr, n, page, to_user);
  	} else {
  		/* Verify object does not incorrectly span multiple pages. */
  		check_page_span(ptr, n, page, to_user);
  	}
f5509cc18   Kees Cook   mm: Hardened user...
239
  }
b5cb15d93   Chris von Recklinghausen   usercopy: Allow b...
240
  static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
f5509cc18   Kees Cook   mm: Hardened user...
241
242
243
  /*
   * Validates that the given object is:
   * - not bogus address
7bff3c069   Qian Cai   mm/usercopy.c: no...
244
245
   * - fully contained by stack (or stack frame, when available)
   * - fully within SLAB object (or object whitelist area, when available)
f5509cc18   Kees Cook   mm: Hardened user...
246
247
248
249
   * - not in kernel text
   */
  void __check_object_size(const void *ptr, unsigned long n, bool to_user)
  {
b5cb15d93   Chris von Recklinghausen   usercopy: Allow b...
250
251
  	if (static_branch_unlikely(&bypass_usercopy_checks))
  		return;
f5509cc18   Kees Cook   mm: Hardened user...
252
253
254
255
256
  	/* Skip all tests if size is zero. */
  	if (!n)
  		return;
  
  	/* Check for invalid addresses. */
f4e6e289c   Kees Cook   usercopy: Include...
257
  	check_bogus_address((const unsigned long)ptr, n, to_user);
f5509cc18   Kees Cook   mm: Hardened user...
258

f5509cc18   Kees Cook   mm: Hardened user...
259
260
261
262
263
264
265
266
267
268
269
270
271
272
  	/* Check for bad stack object. */
  	switch (check_stack_object(ptr, n)) {
  	case NOT_STACK:
  		/* Object is not touching the current process stack. */
  		break;
  	case GOOD_FRAME:
  	case GOOD_STACK:
  		/*
  		 * Object is either in the correct frame (when it
  		 * is possible to check) or just generally on the
  		 * process stack (when frame checking not available).
  		 */
  		return;
  	default:
f4e6e289c   Kees Cook   usercopy: Include...
273
  		usercopy_abort("process stack", NULL, to_user, 0, n);
f5509cc18   Kees Cook   mm: Hardened user...
274
  	}
7bff3c069   Qian Cai   mm/usercopy.c: no...
275
276
  	/* Check for bad heap object. */
  	check_heap_object(ptr, n, to_user);
f5509cc18   Kees Cook   mm: Hardened user...
277
  	/* Check for object in kernel to avoid text exposure. */
f4e6e289c   Kees Cook   usercopy: Include...
278
  	check_kernel_text_object((const unsigned long)ptr, n, to_user);
f5509cc18   Kees Cook   mm: Hardened user...
279
280
  }
  EXPORT_SYMBOL(__check_object_size);
b5cb15d93   Chris von Recklinghausen   usercopy: Allow b...
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
  
  static bool enable_checks __initdata = true;
  
  static int __init parse_hardened_usercopy(char *str)
  {
  	return strtobool(str, &enable_checks);
  }
  
  __setup("hardened_usercopy=", parse_hardened_usercopy);
  
  static int __init set_hardened_usercopy(void)
  {
  	if (enable_checks == false)
  		static_branch_enable(&bypass_usercopy_checks);
  	return 1;
  }
  
  late_initcall(set_hardened_usercopy);