Commit e13053f50664d3d614bbc9b8c83abdad979ac7c9
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
Merge branch 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull voluntary preemption fixes from Ingo Molnar: "This tree contains a speedup which is achieved through better might_sleep()/might_fault() preemption point annotations for uaccess functions, by Michael S Tsirkin: 1. The only reason uaccess routines might sleep is if they fault. Make this explicit for all architectures. 2. A voluntary preemption point in uaccess functions means compiler can't inline them efficiently, this breaks assumptions that they are very fast and small that e.g. net code seems to make. Remove this preemption point so behaviour matches with what callers assume. 3. Accesses (e.g through socket ops) to kernel memory with KERNEL_DS like net/sunrpc does will never sleep. Remove an unconditinal might_sleep() in the might_fault() inline in kernel.h (used when PROVE_LOCKING is not set). 4. Accesses with pagefault_disable() return EFAULT but won't cause caller to sleep. Check for that and thus avoid might_sleep() when PROVE_LOCKING is set. These changes offer a nice speedup for CONFIG_PREEMPT_VOLUNTARY=y kernels, here's a network bandwidth measurement between a virtual machine and the host: before: incoming: 7122.77 Mb/s outgoing: 8480.37 Mb/s after: incoming: 8619.24 Mb/s [ +21.0% ] outgoing: 9455.42 Mb/s [ +11.5% ] I kept these changes in a separate tree, separate from scheduler changes, because it's a mixed MM and scheduler topic" * 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: mm, sched: Allow uaccess in atomic with pagefault_disable() mm, sched: Drop voluntary schedule from might_fault() x86: uaccess s/might_sleep/might_fault/ tile: uaccess s/might_sleep/might_fault/ powerpc: uaccess s/might_sleep/might_fault/ mn10300: uaccess s/might_sleep/might_fault/ microblaze: uaccess s/might_sleep/might_fault/ m32r: uaccess s/might_sleep/might_fault/ frv: uaccess s/might_sleep/might_fault/ arm64: uaccess s/might_sleep/might_fault/ asm-generic: uaccess s/might_sleep/might_fault/
Showing 11 changed files Side-by-side Diff
- arch/arm64/include/asm/uaccess.h
- arch/frv/include/asm/uaccess.h
- arch/m32r/include/asm/uaccess.h
- arch/microblaze/include/asm/uaccess.h
- arch/mn10300/include/asm/uaccess.h
- arch/powerpc/include/asm/uaccess.h
- arch/tile/include/asm/uaccess.h
- arch/x86/include/asm/uaccess_64.h
- include/asm-generic/uaccess.h
- include/linux/kernel.h
- mm/memory.c
arch/arm64/include/asm/uaccess.h
... | ... | @@ -166,7 +166,7 @@ |
166 | 166 | |
167 | 167 | #define get_user(x, ptr) \ |
168 | 168 | ({ \ |
169 | - might_sleep(); \ | |
169 | + might_fault(); \ | |
170 | 170 | access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \ |
171 | 171 | __get_user((x), (ptr)) : \ |
172 | 172 | ((x) = 0, -EFAULT); \ |
... | ... | @@ -227,7 +227,7 @@ |
227 | 227 | |
228 | 228 | #define put_user(x, ptr) \ |
229 | 229 | ({ \ |
230 | - might_sleep(); \ | |
230 | + might_fault(); \ | |
231 | 231 | access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ |
232 | 232 | __put_user((x), (ptr)) : \ |
233 | 233 | -EFAULT; \ |
arch/frv/include/asm/uaccess.h
... | ... | @@ -280,14 +280,14 @@ |
280 | 280 | static inline unsigned long __must_check |
281 | 281 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
282 | 282 | { |
283 | - might_sleep(); | |
283 | + might_fault(); | |
284 | 284 | return __copy_to_user_inatomic(to, from, n); |
285 | 285 | } |
286 | 286 | |
287 | 287 | static inline unsigned long |
288 | 288 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
289 | 289 | { |
290 | - might_sleep(); | |
290 | + might_fault(); | |
291 | 291 | return __copy_from_user_inatomic(to, from, n); |
292 | 292 | } |
293 | 293 |
arch/m32r/include/asm/uaccess.h
... | ... | @@ -216,7 +216,7 @@ |
216 | 216 | ({ \ |
217 | 217 | long __gu_err = 0; \ |
218 | 218 | unsigned long __gu_val; \ |
219 | - might_sleep(); \ | |
219 | + might_fault(); \ | |
220 | 220 | __get_user_size(__gu_val,(ptr),(size),__gu_err); \ |
221 | 221 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
222 | 222 | __gu_err; \ |
... | ... | @@ -227,7 +227,7 @@ |
227 | 227 | long __gu_err = -EFAULT; \ |
228 | 228 | unsigned long __gu_val = 0; \ |
229 | 229 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
230 | - might_sleep(); \ | |
230 | + might_fault(); \ | |
231 | 231 | if (access_ok(VERIFY_READ,__gu_addr,size)) \ |
232 | 232 | __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ |
233 | 233 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
... | ... | @@ -295,7 +295,7 @@ |
295 | 295 | #define __put_user_nocheck(x,ptr,size) \ |
296 | 296 | ({ \ |
297 | 297 | long __pu_err; \ |
298 | - might_sleep(); \ | |
298 | + might_fault(); \ | |
299 | 299 | __put_user_size((x),(ptr),(size),__pu_err); \ |
300 | 300 | __pu_err; \ |
301 | 301 | }) |
... | ... | @@ -305,7 +305,7 @@ |
305 | 305 | ({ \ |
306 | 306 | long __pu_err = -EFAULT; \ |
307 | 307 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
308 | - might_sleep(); \ | |
308 | + might_fault(); \ | |
309 | 309 | if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ |
310 | 310 | __put_user_size((x),__pu_addr,(size),__pu_err); \ |
311 | 311 | __pu_err; \ |
... | ... | @@ -597,7 +597,7 @@ |
597 | 597 | */ |
598 | 598 | #define copy_to_user(to,from,n) \ |
599 | 599 | ({ \ |
600 | - might_sleep(); \ | |
600 | + might_fault(); \ | |
601 | 601 | __generic_copy_to_user((to),(from),(n)); \ |
602 | 602 | }) |
603 | 603 | |
... | ... | @@ -638,7 +638,7 @@ |
638 | 638 | */ |
639 | 639 | #define copy_from_user(to,from,n) \ |
640 | 640 | ({ \ |
641 | - might_sleep(); \ | |
641 | + might_fault(); \ | |
642 | 642 | __generic_copy_from_user((to),(from),(n)); \ |
643 | 643 | }) |
644 | 644 |
arch/microblaze/include/asm/uaccess.h
... | ... | @@ -145,7 +145,7 @@ |
145 | 145 | static inline unsigned long __must_check clear_user(void __user *to, |
146 | 146 | unsigned long n) |
147 | 147 | { |
148 | - might_sleep(); | |
148 | + might_fault(); | |
149 | 149 | if (unlikely(!access_ok(VERIFY_WRITE, to, n))) |
150 | 150 | return n; |
151 | 151 | |
... | ... | @@ -371,7 +371,7 @@ |
371 | 371 | static inline long copy_from_user(void *to, |
372 | 372 | const void __user *from, unsigned long n) |
373 | 373 | { |
374 | - might_sleep(); | |
374 | + might_fault(); | |
375 | 375 | if (access_ok(VERIFY_READ, from, n)) |
376 | 376 | return __copy_from_user(to, from, n); |
377 | 377 | return n; |
... | ... | @@ -385,7 +385,7 @@ |
385 | 385 | static inline long copy_to_user(void __user *to, |
386 | 386 | const void *from, unsigned long n) |
387 | 387 | { |
388 | - might_sleep(); | |
388 | + might_fault(); | |
389 | 389 | if (access_ok(VERIFY_WRITE, to, n)) |
390 | 390 | return __copy_to_user(to, from, n); |
391 | 391 | return n; |
arch/mn10300/include/asm/uaccess.h
... | ... | @@ -471,13 +471,13 @@ |
471 | 471 | |
472 | 472 | #define __copy_to_user(to, from, n) \ |
473 | 473 | ({ \ |
474 | - might_sleep(); \ | |
474 | + might_fault(); \ | |
475 | 475 | __copy_to_user_inatomic((to), (from), (n)); \ |
476 | 476 | }) |
477 | 477 | |
478 | 478 | #define __copy_from_user(to, from, n) \ |
479 | 479 | ({ \ |
480 | - might_sleep(); \ | |
480 | + might_fault(); \ | |
481 | 481 | __copy_from_user_inatomic((to), (from), (n)); \ |
482 | 482 | }) |
483 | 483 |
arch/powerpc/include/asm/uaccess.h
... | ... | @@ -178,7 +178,7 @@ |
178 | 178 | long __pu_err; \ |
179 | 179 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
180 | 180 | if (!is_kernel_addr((unsigned long)__pu_addr)) \ |
181 | - might_sleep(); \ | |
181 | + might_fault(); \ | |
182 | 182 | __chk_user_ptr(ptr); \ |
183 | 183 | __put_user_size((x), __pu_addr, (size), __pu_err); \ |
184 | 184 | __pu_err; \ |
... | ... | @@ -188,7 +188,7 @@ |
188 | 188 | ({ \ |
189 | 189 | long __pu_err = -EFAULT; \ |
190 | 190 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
191 | - might_sleep(); \ | |
191 | + might_fault(); \ | |
192 | 192 | if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ |
193 | 193 | __put_user_size((x), __pu_addr, (size), __pu_err); \ |
194 | 194 | __pu_err; \ |
... | ... | @@ -268,7 +268,7 @@ |
268 | 268 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
269 | 269 | __chk_user_ptr(ptr); \ |
270 | 270 | if (!is_kernel_addr((unsigned long)__gu_addr)) \ |
271 | - might_sleep(); \ | |
271 | + might_fault(); \ | |
272 | 272 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
273 | 273 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
274 | 274 | __gu_err; \ |
... | ... | @@ -282,7 +282,7 @@ |
282 | 282 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
283 | 283 | __chk_user_ptr(ptr); \ |
284 | 284 | if (!is_kernel_addr((unsigned long)__gu_addr)) \ |
285 | - might_sleep(); \ | |
285 | + might_fault(); \ | |
286 | 286 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
287 | 287 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
288 | 288 | __gu_err; \ |
... | ... | @@ -294,7 +294,7 @@ |
294 | 294 | long __gu_err = -EFAULT; \ |
295 | 295 | unsigned long __gu_val = 0; \ |
296 | 296 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
297 | - might_sleep(); \ | |
297 | + might_fault(); \ | |
298 | 298 | if (access_ok(VERIFY_READ, __gu_addr, (size))) \ |
299 | 299 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
300 | 300 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
301 | 301 | |
... | ... | @@ -419,14 +419,14 @@ |
419 | 419 | static inline unsigned long __copy_from_user(void *to, |
420 | 420 | const void __user *from, unsigned long size) |
421 | 421 | { |
422 | - might_sleep(); | |
422 | + might_fault(); | |
423 | 423 | return __copy_from_user_inatomic(to, from, size); |
424 | 424 | } |
425 | 425 | |
426 | 426 | static inline unsigned long __copy_to_user(void __user *to, |
427 | 427 | const void *from, unsigned long size) |
428 | 428 | { |
429 | - might_sleep(); | |
429 | + might_fault(); | |
430 | 430 | return __copy_to_user_inatomic(to, from, size); |
431 | 431 | } |
432 | 432 | |
... | ... | @@ -434,7 +434,7 @@ |
434 | 434 | |
435 | 435 | static inline unsigned long clear_user(void __user *addr, unsigned long size) |
436 | 436 | { |
437 | - might_sleep(); | |
437 | + might_fault(); | |
438 | 438 | if (likely(access_ok(VERIFY_WRITE, addr, size))) |
439 | 439 | return __clear_user(addr, size); |
440 | 440 | if ((unsigned long)addr < TASK_SIZE) { |
arch/tile/include/asm/uaccess.h
arch/x86/include/asm/uaccess_64.h
include/asm-generic/uaccess.h
... | ... | @@ -163,7 +163,7 @@ |
163 | 163 | |
164 | 164 | #define put_user(x, ptr) \ |
165 | 165 | ({ \ |
166 | - might_sleep(); \ | |
166 | + might_fault(); \ | |
167 | 167 | access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \ |
168 | 168 | __put_user(x, ptr) : \ |
169 | 169 | -EFAULT; \ |
... | ... | @@ -225,7 +225,7 @@ |
225 | 225 | |
226 | 226 | #define get_user(x, ptr) \ |
227 | 227 | ({ \ |
228 | - might_sleep(); \ | |
228 | + might_fault(); \ | |
229 | 229 | access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \ |
230 | 230 | __get_user(x, ptr) : \ |
231 | 231 | -EFAULT; \ |
... | ... | @@ -255,7 +255,7 @@ |
255 | 255 | static inline long copy_from_user(void *to, |
256 | 256 | const void __user * from, unsigned long n) |
257 | 257 | { |
258 | - might_sleep(); | |
258 | + might_fault(); | |
259 | 259 | if (access_ok(VERIFY_READ, from, n)) |
260 | 260 | return __copy_from_user(to, from, n); |
261 | 261 | else |
... | ... | @@ -265,7 +265,7 @@ |
265 | 265 | static inline long copy_to_user(void __user *to, |
266 | 266 | const void *from, unsigned long n) |
267 | 267 | { |
268 | - might_sleep(); | |
268 | + might_fault(); | |
269 | 269 | if (access_ok(VERIFY_WRITE, to, n)) |
270 | 270 | return __copy_to_user(to, from, n); |
271 | 271 | else |
... | ... | @@ -336,7 +336,7 @@ |
336 | 336 | static inline __must_check unsigned long |
337 | 337 | clear_user(void __user *to, unsigned long n) |
338 | 338 | { |
339 | - might_sleep(); | |
339 | + might_fault(); | |
340 | 340 | if (!access_ok(VERIFY_WRITE, to, n)) |
341 | 341 | return n; |
342 | 342 |
include/linux/kernel.h
... | ... | @@ -193,13 +193,10 @@ |
193 | 193 | (__x < 0) ? -__x : __x; \ |
194 | 194 | }) |
195 | 195 | |
196 | -#ifdef CONFIG_PROVE_LOCKING | |
196 | +#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) | |
197 | 197 | void might_fault(void); |
198 | 198 | #else |
199 | -static inline void might_fault(void) | |
200 | -{ | |
201 | - might_sleep(); | |
202 | -} | |
199 | +static inline void might_fault(void) { } | |
203 | 200 | #endif |
204 | 201 | |
205 | 202 | extern struct atomic_notifier_head panic_notifier_list; |
mm/memory.c
... | ... | @@ -4201,7 +4201,7 @@ |
4201 | 4201 | up_read(&mm->mmap_sem); |
4202 | 4202 | } |
4203 | 4203 | |
4204 | -#ifdef CONFIG_PROVE_LOCKING | |
4204 | +#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) | |
4205 | 4205 | void might_fault(void) |
4206 | 4206 | { |
4207 | 4207 | /* |
4208 | 4208 | |
... | ... | @@ -4213,13 +4213,17 @@ |
4213 | 4213 | if (segment_eq(get_fs(), KERNEL_DS)) |
4214 | 4214 | return; |
4215 | 4215 | |
4216 | - might_sleep(); | |
4217 | 4216 | /* |
4218 | 4217 | * it would be nicer only to annotate paths which are not under |
4219 | 4218 | * pagefault_disable, however that requires a larger audit and |
4220 | 4219 | * providing helpers like get_user_atomic. |
4221 | 4220 | */ |
4222 | - if (!in_atomic() && current->mm) | |
4221 | + if (in_atomic()) | |
4222 | + return; | |
4223 | + | |
4224 | + __might_sleep(__FILE__, __LINE__, 0); | |
4225 | + | |
4226 | + if (current->mm) | |
4223 | 4227 | might_lock_read(¤t->mm->mmap_sem); |
4224 | 4228 | } |
4225 | 4229 | EXPORT_SYMBOL(might_fault); |