Commit cba6d0d64ee53772b285d0c0c288deefbeaf7775
1 parent
6887a4131d
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
Revert "rcu: Move PREEMPT_RCU preemption to switch_to() invocation"
This reverts commit 616c310e83b872024271c915c1b9ab505b9efad9. (Move PREEMPT_RCU preemption to switch_to() invocation). Testing by Sasha Levin <levinsasha928@gmail.com> showed that this can result in deadlock due to invoking the scheduler when one of the runqueue locks is held. Because this commit was simply a performance optimization, revert it. Reported-by: Sasha Levin <levinsasha928@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Sasha Levin <levinsasha928@gmail.com>
Showing 8 changed files with 19 additions and 16 deletions Side-by-side Diff
arch/um/drivers/mconsole_kern.c
include/linux/rcupdate.h
... | ... | @@ -184,7 +184,6 @@ |
184 | 184 | /* Internal to kernel */ |
185 | 185 | extern void rcu_sched_qs(int cpu); |
186 | 186 | extern void rcu_bh_qs(int cpu); |
187 | -extern void rcu_preempt_note_context_switch(void); | |
188 | 187 | extern void rcu_check_callbacks(int cpu, int user); |
189 | 188 | struct notifier_block; |
190 | 189 | extern void rcu_idle_enter(void); |
include/linux/rcutiny.h
... | ... | @@ -87,6 +87,10 @@ |
87 | 87 | |
88 | 88 | #ifdef CONFIG_TINY_RCU |
89 | 89 | |
90 | +static inline void rcu_preempt_note_context_switch(void) | |
91 | +{ | |
92 | +} | |
93 | + | |
90 | 94 | static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
91 | 95 | { |
92 | 96 | *delta_jiffies = ULONG_MAX; |
... | ... | @@ -95,6 +99,7 @@ |
95 | 99 | |
96 | 100 | #else /* #ifdef CONFIG_TINY_RCU */ |
97 | 101 | |
102 | +void rcu_preempt_note_context_switch(void); | |
98 | 103 | int rcu_preempt_needs_cpu(void); |
99 | 104 | |
100 | 105 | static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
... | ... | @@ -108,6 +113,7 @@ |
108 | 113 | static inline void rcu_note_context_switch(int cpu) |
109 | 114 | { |
110 | 115 | rcu_sched_qs(cpu); |
116 | + rcu_preempt_note_context_switch(); | |
111 | 117 | } |
112 | 118 | |
113 | 119 | /* |
include/linux/sched.h
... | ... | @@ -1871,19 +1871,9 @@ |
1871 | 1871 | INIT_LIST_HEAD(&p->rcu_node_entry); |
1872 | 1872 | } |
1873 | 1873 | |
1874 | -static inline void rcu_switch_from(struct task_struct *prev) | |
1875 | -{ | |
1876 | - if (prev->rcu_read_lock_nesting != 0) | |
1877 | - rcu_preempt_note_context_switch(); | |
1878 | -} | |
1879 | - | |
1880 | 1874 | #else |
1881 | 1875 | |
1882 | 1876 | static inline void rcu_copy_process(struct task_struct *p) |
1883 | -{ | |
1884 | -} | |
1885 | - | |
1886 | -static inline void rcu_switch_from(struct task_struct *prev) | |
1887 | 1877 | { |
1888 | 1878 | } |
1889 | 1879 |
kernel/rcutree.c
kernel/rcutree.h
... | ... | @@ -444,6 +444,7 @@ |
444 | 444 | /* Forward declarations for rcutree_plugin.h */ |
445 | 445 | static void rcu_bootup_announce(void); |
446 | 446 | long rcu_batches_completed(void); |
447 | +static void rcu_preempt_note_context_switch(int cpu); | |
447 | 448 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
448 | 449 | #ifdef CONFIG_HOTPLUG_CPU |
449 | 450 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, |
kernel/rcutree_plugin.h
... | ... | @@ -153,7 +153,7 @@ |
153 | 153 | * |
154 | 154 | * Caller must disable preemption. |
155 | 155 | */ |
156 | -void rcu_preempt_note_context_switch(void) | |
156 | +static void rcu_preempt_note_context_switch(int cpu) | |
157 | 157 | { |
158 | 158 | struct task_struct *t = current; |
159 | 159 | unsigned long flags; |
... | ... | @@ -164,7 +164,7 @@ |
164 | 164 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
165 | 165 | |
166 | 166 | /* Possibly blocking in an RCU read-side critical section. */ |
167 | - rdp = __this_cpu_ptr(rcu_preempt_state.rda); | |
167 | + rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); | |
168 | 168 | rnp = rdp->mynode; |
169 | 169 | raw_spin_lock_irqsave(&rnp->lock, flags); |
170 | 170 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
... | ... | @@ -228,7 +228,7 @@ |
228 | 228 | * means that we continue to block the current grace period. |
229 | 229 | */ |
230 | 230 | local_irq_save(flags); |
231 | - rcu_preempt_qs(smp_processor_id()); | |
231 | + rcu_preempt_qs(cpu); | |
232 | 232 | local_irq_restore(flags); |
233 | 233 | } |
234 | 234 | |
... | ... | @@ -1000,6 +1000,14 @@ |
1000 | 1000 | rcu_sched_force_quiescent_state(); |
1001 | 1001 | } |
1002 | 1002 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
1003 | + | |
1004 | +/* | |
1005 | + * Because preemptible RCU does not exist, we never have to check for | |
1006 | + * CPUs being in quiescent states. | |
1007 | + */ | |
1008 | +static void rcu_preempt_note_context_switch(int cpu) | |
1009 | +{ | |
1010 | +} | |
1003 | 1011 | |
1004 | 1012 | /* |
1005 | 1013 | * Because preemptible RCU does not exist, there are never any preempted |