Commit 97f61e059bf498022b88fd800ed629018bacfcc2
Committed by
Greg Kroah-Hartman
1 parent
51528f7f9d
mutex: Avoid gcc version dependent __builtin_constant_p() usage
commit b0267507dfd0187fb7840a0ec461a510a7f041c5 upstream. Commit 040a0a37 ("mutex: Add support for wound/wait style locks") used "!__builtin_constant_p(p == NULL)" but gcc 3.x cannot handle such expression correctly, leading to boot failure when built with CONFIG_DEBUG_MUTEXES=y. Fix it by explicitly passing a bool which tells whether p != NULL or not. [ PeterZ: This is a sad patch, but provided it actually generates similar code I suppose its the best we can do bar whole sale deprecating gcc-3. ] Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Cc: peterz@infradead.org Cc: imirkin@alum.mit.edu Cc: daniel.vetter@ffwll.ch Cc: robdclark@gmail.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/201310171945.AGB17114.FSQVtHOJFOOFML@I-love.SAKURA.ne.jp Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 1 changed file with 16 additions and 16 deletions Side-by-side Diff
kernel/mutex.c
... | ... | @@ -408,7 +408,7 @@ |
408 | 408 | static __always_inline int __sched |
409 | 409 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
410 | 410 | struct lockdep_map *nest_lock, unsigned long ip, |
411 | - struct ww_acquire_ctx *ww_ctx) | |
411 | + struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) | |
412 | 412 | { |
413 | 413 | struct task_struct *task = current; |
414 | 414 | struct mutex_waiter waiter; |
... | ... | @@ -448,7 +448,7 @@ |
448 | 448 | struct task_struct *owner; |
449 | 449 | struct mspin_node node; |
450 | 450 | |
451 | - if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { | |
451 | + if (use_ww_ctx && ww_ctx->acquired > 0) { | |
452 | 452 | struct ww_mutex *ww; |
453 | 453 | |
454 | 454 | ww = container_of(lock, struct ww_mutex, base); |
... | ... | @@ -478,7 +478,7 @@ |
478 | 478 | if ((atomic_read(&lock->count) == 1) && |
479 | 479 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { |
480 | 480 | lock_acquired(&lock->dep_map, ip); |
481 | - if (!__builtin_constant_p(ww_ctx == NULL)) { | |
481 | + if (use_ww_ctx) { | |
482 | 482 | struct ww_mutex *ww; |
483 | 483 | ww = container_of(lock, struct ww_mutex, base); |
484 | 484 | |
... | ... | @@ -548,7 +548,7 @@ |
548 | 548 | goto err; |
549 | 549 | } |
550 | 550 | |
551 | - if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { | |
551 | + if (use_ww_ctx && ww_ctx->acquired > 0) { | |
552 | 552 | ret = __mutex_lock_check_stamp(lock, ww_ctx); |
553 | 553 | if (ret) |
554 | 554 | goto err; |
... | ... | @@ -568,7 +568,7 @@ |
568 | 568 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
569 | 569 | mutex_set_owner(lock); |
570 | 570 | |
571 | - if (!__builtin_constant_p(ww_ctx == NULL)) { | |
571 | + if (use_ww_ctx) { | |
572 | 572 | struct ww_mutex *ww = container_of(lock, |
573 | 573 | struct ww_mutex, |
574 | 574 | base); |
... | ... | @@ -618,7 +618,7 @@ |
618 | 618 | { |
619 | 619 | might_sleep(); |
620 | 620 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
621 | - subclass, NULL, _RET_IP_, NULL); | |
621 | + subclass, NULL, _RET_IP_, NULL, 0); | |
622 | 622 | } |
623 | 623 | |
624 | 624 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
... | ... | @@ -628,7 +628,7 @@ |
628 | 628 | { |
629 | 629 | might_sleep(); |
630 | 630 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
631 | - 0, nest, _RET_IP_, NULL); | |
631 | + 0, nest, _RET_IP_, NULL, 0); | |
632 | 632 | } |
633 | 633 | |
634 | 634 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
... | ... | @@ -638,7 +638,7 @@ |
638 | 638 | { |
639 | 639 | might_sleep(); |
640 | 640 | return __mutex_lock_common(lock, TASK_KILLABLE, |
641 | - subclass, NULL, _RET_IP_, NULL); | |
641 | + subclass, NULL, _RET_IP_, NULL, 0); | |
642 | 642 | } |
643 | 643 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
644 | 644 | |
... | ... | @@ -647,7 +647,7 @@ |
647 | 647 | { |
648 | 648 | might_sleep(); |
649 | 649 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
650 | - subclass, NULL, _RET_IP_, NULL); | |
650 | + subclass, NULL, _RET_IP_, NULL, 0); | |
651 | 651 | } |
652 | 652 | |
653 | 653 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
... | ... | @@ -685,7 +685,7 @@ |
685 | 685 | |
686 | 686 | might_sleep(); |
687 | 687 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
688 | - 0, &ctx->dep_map, _RET_IP_, ctx); | |
688 | + 0, &ctx->dep_map, _RET_IP_, ctx, 1); | |
689 | 689 | if (!ret && ctx->acquired > 1) |
690 | 690 | return ww_mutex_deadlock_injection(lock, ctx); |
691 | 691 | |
... | ... | @@ -700,7 +700,7 @@ |
700 | 700 | |
701 | 701 | might_sleep(); |
702 | 702 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
703 | - 0, &ctx->dep_map, _RET_IP_, ctx); | |
703 | + 0, &ctx->dep_map, _RET_IP_, ctx, 1); | |
704 | 704 | |
705 | 705 | if (!ret && ctx->acquired > 1) |
706 | 706 | return ww_mutex_deadlock_injection(lock, ctx); |
707 | 707 | |
708 | 708 | |
709 | 709 | |
... | ... | @@ -812,28 +812,28 @@ |
812 | 812 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
813 | 813 | |
814 | 814 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, |
815 | - NULL, _RET_IP_, NULL); | |
815 | + NULL, _RET_IP_, NULL, 0); | |
816 | 816 | } |
817 | 817 | |
818 | 818 | static noinline int __sched |
819 | 819 | __mutex_lock_killable_slowpath(struct mutex *lock) |
820 | 820 | { |
821 | 821 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, |
822 | - NULL, _RET_IP_, NULL); | |
822 | + NULL, _RET_IP_, NULL, 0); | |
823 | 823 | } |
824 | 824 | |
825 | 825 | static noinline int __sched |
826 | 826 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
827 | 827 | { |
828 | 828 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, |
829 | - NULL, _RET_IP_, NULL); | |
829 | + NULL, _RET_IP_, NULL, 0); | |
830 | 830 | } |
831 | 831 | |
832 | 832 | static noinline int __sched |
833 | 833 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
834 | 834 | { |
835 | 835 | return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, |
836 | - NULL, _RET_IP_, ctx); | |
836 | + NULL, _RET_IP_, ctx, 1); | |
837 | 837 | } |
838 | 838 | |
839 | 839 | static noinline int __sched |
... | ... | @@ -841,7 +841,7 @@ |
841 | 841 | struct ww_acquire_ctx *ctx) |
842 | 842 | { |
843 | 843 | return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, |
844 | - NULL, _RET_IP_, ctx); | |
844 | + NULL, _RET_IP_, ctx, 1); | |
845 | 845 | } |
846 | 846 | |
847 | 847 | #endif |