Blame view

mm/usercopy.c 7.73 KB
f5509cc18   Kees Cook   mm: Hardened user...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
  /*
   * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
   * which are designed to protect kernel memory from needless exposure
   * and overwrite under many unintended conditions. This code is based
   * on PAX_USERCOPY, which is:
   *
   * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
   * Security Inc.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   *
   */
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  
  #include <linux/mm.h>
  #include <linux/slab.h>
  #include <asm/sections.h>
  
  enum {
  	BAD_STACK = -1,
  	NOT_STACK = 0,
  	GOOD_FRAME,
  	GOOD_STACK,
  };
  
  /*
   * Checks if a given pointer and length is contained by the current
   * stack frame (if possible).
   *
   * Returns:
   *	NOT_STACK: not at all on the stack
   *	GOOD_FRAME: fully within a valid stack frame
   *	GOOD_STACK: fully on the stack (when can't do frame-checking)
   *	BAD_STACK: error condition (invalid stack position or bad stack frame)
   */
  static noinline int check_stack_object(const void *obj, unsigned long len)
  {
  	const void * const stack = task_stack_page(current);
  	const void * const stackend = stack + THREAD_SIZE;
  	int ret;
  
  	/* Object is not on the stack at all. */
  	if (obj + len <= stack || stackend <= obj)
  		return NOT_STACK;
  
  	/*
  	 * Reject: object partially overlaps the stack (passing the
  	 * the check above means at least one end is within the stack,
  	 * so if this check fails, the other end is outside the stack).
  	 */
  	if (obj < stack || stackend < obj + len)
  		return BAD_STACK;
  
  	/* Check if object is safely within a valid frame. */
  	ret = arch_within_stack_frames(stack, stackend, obj, len);
  	if (ret)
  		return ret;
  
  	return GOOD_STACK;
  }
  
  static void report_usercopy(const void *ptr, unsigned long len,
  			    bool to_user, const char *type)
  {
  	pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)
  ",
  		to_user ? "exposure" : "overwrite",
  		to_user ? "from" : "to", ptr, type ? : "unknown", len);
  	/*
  	 * For greater effect, it would be nice to do do_group_exit(),
  	 * but BUG() actually hooks all the lock-breaking and per-arch
  	 * Oops code, so that is used here instead.
  	 */
  	BUG();
  }
  
  /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
  static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
  		     unsigned long high)
  {
  	unsigned long check_low = (uintptr_t)ptr;
  	unsigned long check_high = check_low + n;
  
  	/* Does not overlap if entirely above or entirely below. */
94cd97af6   Josh Poimboeuf   usercopy: fix ove...
87
  	if (check_low >= high || check_high <= low)
f5509cc18   Kees Cook   mm: Hardened user...
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
  		return false;
  
  	return true;
  }
  
  /* Is this address range in the kernel text area? */
  static inline const char *check_kernel_text_object(const void *ptr,
  						   unsigned long n)
  {
  	unsigned long textlow = (unsigned long)_stext;
  	unsigned long texthigh = (unsigned long)_etext;
  	unsigned long textlow_linear, texthigh_linear;
  
  	if (overlaps(ptr, n, textlow, texthigh))
  		return "<kernel text>";
  
  	/*
  	 * Some architectures have virtual memory mappings with a secondary
  	 * mapping of the kernel text, i.e. there is more than one virtual
  	 * kernel address that points to the kernel image. It is usually
  	 * when there is a separate linear physical memory mapping, in that
  	 * __pa() is not just the reverse of __va(). This can be detected
  	 * and checked:
  	 */
  	textlow_linear = (unsigned long)__va(__pa(textlow));
  	/* No different mapping: we're done. */
  	if (textlow_linear == textlow)
  		return NULL;
  
  	/* Check the secondary mapping... */
  	texthigh_linear = (unsigned long)__va(__pa(texthigh));
  	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
  		return "<linear kernel text>";
  
  	return NULL;
  }
  
  static inline const char *check_bogus_address(const void *ptr, unsigned long n)
  {
  	/* Reject if object wraps past end of memory. */
7329a6558   Eric Biggers   usercopy: avoid p...
128
  	if ((unsigned long)ptr + n < (unsigned long)ptr)
f5509cc18   Kees Cook   mm: Hardened user...
129
130
131
132
133
134
135
136
  		return "<wrapped address>";
  
  	/* Reject if NULL or ZERO-allocation. */
  	if (ZERO_OR_NULL_PTR(ptr))
  		return "<null>";
  
  	return NULL;
  }
8e1f74ea0   Kees Cook   usercopy: remove ...
137
138
139
  /* Checks for allocs that are marked in some way as spanning multiple pages. */
  static inline const char *check_page_span(const void *ptr, unsigned long n,
  					  struct page *page, bool to_user)
f5509cc18   Kees Cook   mm: Hardened user...
140
  {
8e1f74ea0   Kees Cook   usercopy: remove ...
141
  #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
f5509cc18   Kees Cook   mm: Hardened user...
142
  	const void *end = ptr + n - 1;
8e1f74ea0   Kees Cook   usercopy: remove ...
143
  	struct page *endpage;
f5509cc18   Kees Cook   mm: Hardened user...
144
145
146
  	bool is_reserved, is_cma;
  
  	/*
f5509cc18   Kees Cook   mm: Hardened user...
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
  	 * Sometimes the kernel data regions are not marked Reserved (see
  	 * check below). And sometimes [_sdata,_edata) does not cover
  	 * rodata and/or bss, so check each range explicitly.
  	 */
  
  	/* Allow reads of kernel rodata region (if not marked as Reserved). */
  	if (ptr >= (const void *)__start_rodata &&
  	    end <= (const void *)__end_rodata) {
  		if (!to_user)
  			return "<rodata>";
  		return NULL;
  	}
  
  	/* Allow kernel data region (if not marked as Reserved). */
  	if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
  		return NULL;
  
  	/* Allow kernel bss region (if not marked as Reserved). */
  	if (ptr >= (const void *)__bss_start &&
  	    end <= (const void *)__bss_stop)
  		return NULL;
  
  	/* Is the object wholly within one base page? */
  	if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
  		   ((unsigned long)end & (unsigned long)PAGE_MASK)))
  		return NULL;
8e1f74ea0   Kees Cook   usercopy: remove ...
173
  	/* Allow if fully inside the same compound (__GFP_COMP) page. */
f5509cc18   Kees Cook   mm: Hardened user...
174
175
176
177
178
179
180
181
182
183
184
185
  	endpage = virt_to_head_page(end);
  	if (likely(endpage == page))
  		return NULL;
  
  	/*
  	 * Reject if range is entirely either Reserved (i.e. special or
  	 * device memory), or CMA. Otherwise, reject since the object spans
  	 * several independently allocated pages.
  	 */
  	is_reserved = PageReserved(page);
  	is_cma = is_migrate_cma_page(page);
  	if (!is_reserved && !is_cma)
8e1f74ea0   Kees Cook   usercopy: remove ...
186
  		return "<spans multiple pages>";
f5509cc18   Kees Cook   mm: Hardened user...
187
188
189
190
  
  	for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
  		page = virt_to_head_page(ptr);
  		if (is_reserved && !PageReserved(page))
8e1f74ea0   Kees Cook   usercopy: remove ...
191
  			return "<spans Reserved and non-Reserved pages>";
f5509cc18   Kees Cook   mm: Hardened user...
192
  		if (is_cma && !is_migrate_cma_page(page))
8e1f74ea0   Kees Cook   usercopy: remove ...
193
  			return "<spans CMA and non-CMA pages>";
f5509cc18   Kees Cook   mm: Hardened user...
194
  	}
8e1f74ea0   Kees Cook   usercopy: remove ...
195
  #endif
f5509cc18   Kees Cook   mm: Hardened user...
196
197
  
  	return NULL;
8e1f74ea0   Kees Cook   usercopy: remove ...
198
199
200
201
202
203
204
205
206
207
208
  }
  
  static inline const char *check_heap_object(const void *ptr, unsigned long n,
  					    bool to_user)
  {
  	struct page *page;
  
  	/*
  	 * Some architectures (arm64) return true for virt_addr_valid() on
  	 * vmalloced addresses. Work around this by checking for vmalloc
  	 * first.
aa4f06011   Laura Abbott   mm: usercopy: Che...
209
210
211
  	 *
  	 * We also need to check for module addresses explicitly since we
  	 * may copy static data from modules to userspace
8e1f74ea0   Kees Cook   usercopy: remove ...
212
  	 */
aa4f06011   Laura Abbott   mm: usercopy: Che...
213
  	if (is_vmalloc_or_module_addr(ptr))
8e1f74ea0   Kees Cook   usercopy: remove ...
214
215
216
217
218
219
220
221
222
223
  		return NULL;
  
  	if (!virt_addr_valid(ptr))
  		return NULL;
  
  	page = virt_to_head_page(ptr);
  
  	/* Check slab allocator for flags and size. */
  	if (PageSlab(page))
  		return __check_heap_object(ptr, n, page);
f5509cc18   Kees Cook   mm: Hardened user...
224

8e1f74ea0   Kees Cook   usercopy: remove ...
225
226
  	/* Verify object does not incorrectly span multiple pages. */
  	return check_page_span(ptr, n, page, to_user);
f5509cc18   Kees Cook   mm: Hardened user...
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
  }
  
  /*
   * Validates that the given object is:
   * - not bogus address
   * - known-safe heap or stack object
   * - not in kernel text
   */
  void __check_object_size(const void *ptr, unsigned long n, bool to_user)
  {
  	const char *err;
  
  	/* Skip all tests if size is zero. */
  	if (!n)
  		return;
  
  	/* Check for invalid addresses. */
  	err = check_bogus_address(ptr, n);
  	if (err)
  		goto report;
  
  	/* Check for bad heap object. */
  	err = check_heap_object(ptr, n, to_user);
  	if (err)
  		goto report;
  
  	/* Check for bad stack object. */
  	switch (check_stack_object(ptr, n)) {
  	case NOT_STACK:
  		/* Object is not touching the current process stack. */
  		break;
  	case GOOD_FRAME:
  	case GOOD_STACK:
  		/*
  		 * Object is either in the correct frame (when it
  		 * is possible to check) or just generally on the
  		 * process stack (when frame checking not available).
  		 */
  		return;
  	default:
  		err = "<process stack>";
  		goto report;
  	}
  
  	/* Check for object in kernel to avoid text exposure. */
  	err = check_kernel_text_object(ptr, n);
  	if (!err)
  		return;
  
  report:
  	report_usercopy(ptr, n, to_user, err);
  }
  EXPORT_SYMBOL(__check_object_size);