Commit 80a77045daacc660659093b312ca0708b53ed558

Authored by Linus Torvalds

Merge tag 'usercopy-v4.8-rc6-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull more hardened usercopyfixes from Kees Cook:

 - force check_object_size() to be inline too

 - move page-spanning check behind a CONFIG since it's triggering false
   positives

[ Changed the page-spanning config option to depend on EXPERT in the
  merge.  That way it still gets build testing, and you can enable it if
  you want to, but is never enabled for "normal" configurations ]

* tag 'usercopy-v4.8-rc6-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  usercopy: remove page-spanning test for now
  usercopy: force check_object_size() inline

Showing 3 changed files Side-by-side Diff

include/linux/thread_info.h
... ... @@ -118,8 +118,8 @@
118 118 extern void __check_object_size(const void *ptr, unsigned long n,
119 119 bool to_user);
120 120  
121   -static inline void check_object_size(const void *ptr, unsigned long n,
122   - bool to_user)
  121 +static __always_inline void check_object_size(const void *ptr, unsigned long n,
  122 + bool to_user)
123 123 {
124 124 if (!__builtin_constant_p(n))
125 125 __check_object_size(ptr, n, to_user);
... ... @@ -134,31 +134,16 @@
134 134 return NULL;
135 135 }
136 136  
137   -static inline const char *check_heap_object(const void *ptr, unsigned long n,
138   - bool to_user)
  137 +/* Checks for allocs that are marked in some way as spanning multiple pages. */
  138 +static inline const char *check_page_span(const void *ptr, unsigned long n,
  139 + struct page *page, bool to_user)
139 140 {
140   - struct page *page, *endpage;
  141 +#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
141 142 const void *end = ptr + n - 1;
  143 + struct page *endpage;
142 144 bool is_reserved, is_cma;
143 145  
144 146 /*
145   - * Some architectures (arm64) return true for virt_addr_valid() on
146   - * vmalloced addresses. Work around this by checking for vmalloc
147   - * first.
148   - */
149   - if (is_vmalloc_addr(ptr))
150   - return NULL;
151   -
152   - if (!virt_addr_valid(ptr))
153   - return NULL;
154   -
155   - page = virt_to_head_page(ptr);
156   -
157   - /* Check slab allocator for flags and size. */
158   - if (PageSlab(page))
159   - return __check_heap_object(ptr, n, page);
160   -
161   - /*
162 147 * Sometimes the kernel data regions are not marked Reserved (see
163 148 * check below). And sometimes [_sdata,_edata) does not cover
164 149 * rodata and/or bss, so check each range explicitly.
... ... @@ -186,7 +171,7 @@
186 171 ((unsigned long)end & (unsigned long)PAGE_MASK)))
187 172 return NULL;
188 173  
189   - /* Allow if start and end are inside the same compound page. */
  174 + /* Allow if fully inside the same compound (__GFP_COMP) page. */
190 175 endpage = virt_to_head_page(end);
191 176 if (likely(endpage == page))
192 177 return NULL;
193 178  
194 179  
195 180  
196 181  
197 182  
... ... @@ -199,20 +184,44 @@
199 184 is_reserved = PageReserved(page);
200 185 is_cma = is_migrate_cma_page(page);
201 186 if (!is_reserved && !is_cma)
202   - goto reject;
  187 + return "<spans multiple pages>";
203 188  
204 189 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
205 190 page = virt_to_head_page(ptr);
206 191 if (is_reserved && !PageReserved(page))
207   - goto reject;
  192 + return "<spans Reserved and non-Reserved pages>";
208 193 if (is_cma && !is_migrate_cma_page(page))
209   - goto reject;
  194 + return "<spans CMA and non-CMA pages>";
210 195 }
  196 +#endif
211 197  
212 198 return NULL;
  199 +}
213 200  
214   -reject:
215   - return "<spans multiple pages>";
  201 +static inline const char *check_heap_object(const void *ptr, unsigned long n,
  202 + bool to_user)
  203 +{
  204 + struct page *page;
  205 +
  206 + /*
  207 + * Some architectures (arm64) return true for virt_addr_valid() on
  208 + * vmalloced addresses. Work around this by checking for vmalloc
  209 + * first.
  210 + */
  211 + if (is_vmalloc_addr(ptr))
  212 + return NULL;
  213 +
  214 + if (!virt_addr_valid(ptr))
  215 + return NULL;
  216 +
  217 + page = virt_to_head_page(ptr);
  218 +
  219 + /* Check slab allocator for flags and size. */
  220 + if (PageSlab(page))
  221 + return __check_heap_object(ptr, n, page);
  222 +
  223 + /* Verify object does not incorrectly span multiple pages. */
  224 + return check_page_span(ptr, n, page, to_user);
216 225 }
217 226  
218 227 /*
... ... @@ -147,6 +147,17 @@
147 147 or are part of the kernel text. This kills entire classes
148 148 of heap overflow exploits and similar kernel memory exposures.
149 149  
  150 +config HARDENED_USERCOPY_PAGESPAN
  151 + bool "Refuse to copy allocations that span multiple pages"
  152 + depends on HARDENED_USERCOPY
  153 + depends on EXPERT
  154 + help
  155 + When a multi-page allocation is done without __GFP_COMP,
  156 + hardened usercopy will reject attempts to copy it. There are,
  157 + however, several cases of this in the kernel that have not all
  158 + been removed. This config is intended to be used only while
  159 + trying to find such users.
  160 +
150 161 source security/selinux/Kconfig
151 162 source security/smack/Kconfig
152 163 source security/tomoyo/Kconfig