Commit d209d74d52ab39dc071656533cac095294f70de7
1 parent
1d61548254
Exists in
master
and in
4 other branches
rtmutes: Convert rtmutex.lock to raw_spinlock
Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
Showing 3 changed files with 30 additions and 30 deletions Side-by-side Diff
include/linux/rtmutex.h
... | ... | @@ -24,7 +24,7 @@ |
24 | 24 | * @owner: the mutex owner |
25 | 25 | */ |
26 | 26 | struct rt_mutex { |
27 | - spinlock_t wait_lock; | |
27 | + raw_spinlock_t wait_lock; | |
28 | 28 | struct plist_head wait_list; |
29 | 29 | struct task_struct *owner; |
30 | 30 | #ifdef CONFIG_DEBUG_RT_MUTEXES |
... | ... | @@ -63,8 +63,8 @@ |
63 | 63 | #endif |
64 | 64 | |
65 | 65 | #define __RT_MUTEX_INITIALIZER(mutexname) \ |
66 | - { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ | |
67 | - , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ | |
66 | + { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ | |
67 | + , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \ | |
68 | 68 | , .owner = NULL \ |
69 | 69 | __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} |
70 | 70 |
kernel/futex.c
... | ... | @@ -760,7 +760,7 @@ |
760 | 760 | if (!pi_state) |
761 | 761 | return -EINVAL; |
762 | 762 | |
763 | - spin_lock(&pi_state->pi_mutex.wait_lock); | |
763 | + raw_spin_lock(&pi_state->pi_mutex.wait_lock); | |
764 | 764 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
765 | 765 | |
766 | 766 | /* |
... | ... | @@ -789,7 +789,7 @@ |
789 | 789 | else if (curval != uval) |
790 | 790 | ret = -EINVAL; |
791 | 791 | if (ret) { |
792 | - spin_unlock(&pi_state->pi_mutex.wait_lock); | |
792 | + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); | |
793 | 793 | return ret; |
794 | 794 | } |
795 | 795 | } |
... | ... | @@ -805,7 +805,7 @@ |
805 | 805 | pi_state->owner = new_owner; |
806 | 806 | raw_spin_unlock_irq(&new_owner->pi_lock); |
807 | 807 | |
808 | - spin_unlock(&pi_state->pi_mutex.wait_lock); | |
808 | + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); | |
809 | 809 | rt_mutex_unlock(&pi_state->pi_mutex); |
810 | 810 | |
811 | 811 | return 0; |
kernel/rtmutex.c
... | ... | @@ -231,7 +231,7 @@ |
231 | 231 | goto out_unlock_pi; |
232 | 232 | |
233 | 233 | lock = waiter->lock; |
234 | - if (!spin_trylock(&lock->wait_lock)) { | |
234 | + if (!raw_spin_trylock(&lock->wait_lock)) { | |
235 | 235 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
236 | 236 | cpu_relax(); |
237 | 237 | goto retry; |
... | ... | @@ -240,7 +240,7 @@ |
240 | 240 | /* Deadlock detection */ |
241 | 241 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
242 | 242 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
243 | - spin_unlock(&lock->wait_lock); | |
243 | + raw_spin_unlock(&lock->wait_lock); | |
244 | 244 | ret = deadlock_detect ? -EDEADLK : 0; |
245 | 245 | goto out_unlock_pi; |
246 | 246 | } |
... | ... | @@ -280,7 +280,7 @@ |
280 | 280 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
281 | 281 | |
282 | 282 | top_waiter = rt_mutex_top_waiter(lock); |
283 | - spin_unlock(&lock->wait_lock); | |
283 | + raw_spin_unlock(&lock->wait_lock); | |
284 | 284 | |
285 | 285 | if (!detect_deadlock && waiter != top_waiter) |
286 | 286 | goto out_put_task; |
287 | 287 | |
... | ... | @@ -459,12 +459,12 @@ |
459 | 459 | */ |
460 | 460 | get_task_struct(owner); |
461 | 461 | |
462 | - spin_unlock(&lock->wait_lock); | |
462 | + raw_spin_unlock(&lock->wait_lock); | |
463 | 463 | |
464 | 464 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
465 | 465 | task); |
466 | 466 | |
467 | - spin_lock(&lock->wait_lock); | |
467 | + raw_spin_lock(&lock->wait_lock); | |
468 | 468 | |
469 | 469 | return res; |
470 | 470 | } |
471 | 471 | |
... | ... | @@ -575,11 +575,11 @@ |
575 | 575 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
576 | 576 | get_task_struct(owner); |
577 | 577 | |
578 | - spin_unlock(&lock->wait_lock); | |
578 | + raw_spin_unlock(&lock->wait_lock); | |
579 | 579 | |
580 | 580 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); |
581 | 581 | |
582 | - spin_lock(&lock->wait_lock); | |
582 | + raw_spin_lock(&lock->wait_lock); | |
583 | 583 | } |
584 | 584 | |
585 | 585 | /* |
586 | 586 | |
... | ... | @@ -672,14 +672,14 @@ |
672 | 672 | break; |
673 | 673 | } |
674 | 674 | |
675 | - spin_unlock(&lock->wait_lock); | |
675 | + raw_spin_unlock(&lock->wait_lock); | |
676 | 676 | |
677 | 677 | debug_rt_mutex_print_deadlock(waiter); |
678 | 678 | |
679 | 679 | if (waiter->task) |
680 | 680 | schedule_rt_mutex(lock); |
681 | 681 | |
682 | - spin_lock(&lock->wait_lock); | |
682 | + raw_spin_lock(&lock->wait_lock); | |
683 | 683 | set_current_state(state); |
684 | 684 | } |
685 | 685 | |
686 | 686 | |
... | ... | @@ -700,11 +700,11 @@ |
700 | 700 | debug_rt_mutex_init_waiter(&waiter); |
701 | 701 | waiter.task = NULL; |
702 | 702 | |
703 | - spin_lock(&lock->wait_lock); | |
703 | + raw_spin_lock(&lock->wait_lock); | |
704 | 704 | |
705 | 705 | /* Try to acquire the lock again: */ |
706 | 706 | if (try_to_take_rt_mutex(lock)) { |
707 | - spin_unlock(&lock->wait_lock); | |
707 | + raw_spin_unlock(&lock->wait_lock); | |
708 | 708 | return 0; |
709 | 709 | } |
710 | 710 | |
... | ... | @@ -731,7 +731,7 @@ |
731 | 731 | */ |
732 | 732 | fixup_rt_mutex_waiters(lock); |
733 | 733 | |
734 | - spin_unlock(&lock->wait_lock); | |
734 | + raw_spin_unlock(&lock->wait_lock); | |
735 | 735 | |
736 | 736 | /* Remove pending timer: */ |
737 | 737 | if (unlikely(timeout)) |
... | ... | @@ -758,7 +758,7 @@ |
758 | 758 | { |
759 | 759 | int ret = 0; |
760 | 760 | |
761 | - spin_lock(&lock->wait_lock); | |
761 | + raw_spin_lock(&lock->wait_lock); | |
762 | 762 | |
763 | 763 | if (likely(rt_mutex_owner(lock) != current)) { |
764 | 764 | |
... | ... | @@ -770,7 +770,7 @@ |
770 | 770 | fixup_rt_mutex_waiters(lock); |
771 | 771 | } |
772 | 772 | |
773 | - spin_unlock(&lock->wait_lock); | |
773 | + raw_spin_unlock(&lock->wait_lock); | |
774 | 774 | |
775 | 775 | return ret; |
776 | 776 | } |
... | ... | @@ -781,7 +781,7 @@ |
781 | 781 | static void __sched |
782 | 782 | rt_mutex_slowunlock(struct rt_mutex *lock) |
783 | 783 | { |
784 | - spin_lock(&lock->wait_lock); | |
784 | + raw_spin_lock(&lock->wait_lock); | |
785 | 785 | |
786 | 786 | debug_rt_mutex_unlock(lock); |
787 | 787 | |
788 | 788 | |
... | ... | @@ -789,13 +789,13 @@ |
789 | 789 | |
790 | 790 | if (!rt_mutex_has_waiters(lock)) { |
791 | 791 | lock->owner = NULL; |
792 | - spin_unlock(&lock->wait_lock); | |
792 | + raw_spin_unlock(&lock->wait_lock); | |
793 | 793 | return; |
794 | 794 | } |
795 | 795 | |
796 | 796 | wakeup_next_waiter(lock); |
797 | 797 | |
798 | - spin_unlock(&lock->wait_lock); | |
798 | + raw_spin_unlock(&lock->wait_lock); | |
799 | 799 | |
800 | 800 | /* Undo pi boosting if necessary: */ |
801 | 801 | rt_mutex_adjust_prio(current); |
... | ... | @@ -970,8 +970,8 @@ |
970 | 970 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) |
971 | 971 | { |
972 | 972 | lock->owner = NULL; |
973 | - spin_lock_init(&lock->wait_lock); | |
974 | - plist_head_init(&lock->wait_list, &lock->wait_lock); | |
973 | + raw_spin_lock_init(&lock->wait_lock); | |
974 | + plist_head_init_raw(&lock->wait_list, &lock->wait_lock); | |
975 | 975 | |
976 | 976 | debug_rt_mutex_init(lock, name); |
977 | 977 | } |
... | ... | @@ -1032,7 +1032,7 @@ |
1032 | 1032 | { |
1033 | 1033 | int ret; |
1034 | 1034 | |
1035 | - spin_lock(&lock->wait_lock); | |
1035 | + raw_spin_lock(&lock->wait_lock); | |
1036 | 1036 | |
1037 | 1037 | mark_rt_mutex_waiters(lock); |
1038 | 1038 | |
... | ... | @@ -1040,7 +1040,7 @@ |
1040 | 1040 | /* We got the lock for task. */ |
1041 | 1041 | debug_rt_mutex_lock(lock); |
1042 | 1042 | rt_mutex_set_owner(lock, task, 0); |
1043 | - spin_unlock(&lock->wait_lock); | |
1043 | + raw_spin_unlock(&lock->wait_lock); | |
1044 | 1044 | rt_mutex_deadlock_account_lock(lock, task); |
1045 | 1045 | return 1; |
1046 | 1046 | } |
... | ... | @@ -1056,7 +1056,7 @@ |
1056 | 1056 | */ |
1057 | 1057 | ret = 0; |
1058 | 1058 | } |
1059 | - spin_unlock(&lock->wait_lock); | |
1059 | + raw_spin_unlock(&lock->wait_lock); | |
1060 | 1060 | |
1061 | 1061 | debug_rt_mutex_print_deadlock(waiter); |
1062 | 1062 | |
... | ... | @@ -1106,7 +1106,7 @@ |
1106 | 1106 | { |
1107 | 1107 | int ret; |
1108 | 1108 | |
1109 | - spin_lock(&lock->wait_lock); | |
1109 | + raw_spin_lock(&lock->wait_lock); | |
1110 | 1110 | |
1111 | 1111 | set_current_state(TASK_INTERRUPTIBLE); |
1112 | 1112 | |
... | ... | @@ -1124,7 +1124,7 @@ |
1124 | 1124 | */ |
1125 | 1125 | fixup_rt_mutex_waiters(lock); |
1126 | 1126 | |
1127 | - spin_unlock(&lock->wait_lock); | |
1127 | + raw_spin_unlock(&lock->wait_lock); | |
1128 | 1128 | |
1129 | 1129 | /* |
1130 | 1130 | * Readjust priority, when we did not get the lock. We might have been |