Commit 897f0b3c3ff40b443c84e271bef19bd6ae885195
Committed by
Ingo Molnar
1 parent
25c2d55c00
Exists in
master
and in
4 other branches
sched: Kill the broken and deadlockable cpuset_lock/cpuset_cpus_allowed_locked code
This patch just states the fact the cpusets/cpuhotplug interaction is broken and removes the deadlockable code which only pretends to work. - cpuset_lock() doesn't really work. It is needed for cpuset_cpus_allowed_locked() but we can't take this lock in try_to_wake_up()->select_fallback_rq() path. - cpuset_lock() is deadlockable. Suppose that a task T bound to CPU takes callback_mutex. If cpu_down(CPU) happens before T drops callback_mutex stop_machine() preempts T, then migration_call(CPU_DEAD) tries to take cpuset_lock() and hangs forever because CPU is already dead and thus T can't be scheduled. - cpuset_cpus_allowed_locked() is deadlockable too. It takes task_lock() which is not irq-safe, but try_to_wake_up() can be called from irq. Kill them, and change select_fallback_rq() to use cpu_possible_mask, like we currently do without CONFIG_CPUSETS. Also, with or without this patch, with or without CONFIG_CPUSETS, the callers of select_fallback_rq() can race with each other or with set_cpus_allowed() pathes. The subsequent patches try to to fix these problems. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20100315091003.GA9123@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 3 changed files with 4 additions and 46 deletions Side-by-side Diff
include/linux/cpuset.h
... | ... | @@ -21,8 +21,6 @@ |
21 | 21 | extern int cpuset_init(void); |
22 | 22 | extern void cpuset_init_smp(void); |
23 | 23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
24 | -extern void cpuset_cpus_allowed_locked(struct task_struct *p, | |
25 | - struct cpumask *mask); | |
26 | 24 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
27 | 25 | #define cpuset_current_mems_allowed (current->mems_allowed) |
28 | 26 | void cpuset_init_current_mems_allowed(void); |
... | ... | @@ -69,9 +67,6 @@ |
69 | 67 | extern void cpuset_task_status_allowed(struct seq_file *m, |
70 | 68 | struct task_struct *task); |
71 | 69 | |
72 | -extern void cpuset_lock(void); | |
73 | -extern void cpuset_unlock(void); | |
74 | - | |
75 | 70 | extern int cpuset_mem_spread_node(void); |
76 | 71 | |
77 | 72 | static inline int cpuset_do_page_mem_spread(void) |
... | ... | @@ -105,11 +100,6 @@ |
105 | 100 | { |
106 | 101 | cpumask_copy(mask, cpu_possible_mask); |
107 | 102 | } |
108 | -static inline void cpuset_cpus_allowed_locked(struct task_struct *p, | |
109 | - struct cpumask *mask) | |
110 | -{ | |
111 | - cpumask_copy(mask, cpu_possible_mask); | |
112 | -} | |
113 | 103 | |
114 | 104 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
115 | 105 | { |
... | ... | @@ -156,9 +146,6 @@ |
156 | 146 | struct task_struct *task) |
157 | 147 | { |
158 | 148 | } |
159 | - | |
160 | -static inline void cpuset_lock(void) {} | |
161 | -static inline void cpuset_unlock(void) {} | |
162 | 149 | |
163 | 150 | static inline int cpuset_mem_spread_node(void) |
164 | 151 | { |
kernel/cpuset.c
... | ... | @@ -2182,19 +2182,10 @@ |
2182 | 2182 | void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) |
2183 | 2183 | { |
2184 | 2184 | mutex_lock(&callback_mutex); |
2185 | - cpuset_cpus_allowed_locked(tsk, pmask); | |
2186 | - mutex_unlock(&callback_mutex); | |
2187 | -} | |
2188 | - | |
2189 | -/** | |
2190 | - * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. | |
2191 | - * Must be called with callback_mutex held. | |
2192 | - **/ | |
2193 | -void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask) | |
2194 | -{ | |
2195 | 2185 | task_lock(tsk); |
2196 | 2186 | guarantee_online_cpus(task_cs(tsk), pmask); |
2197 | 2187 | task_unlock(tsk); |
2188 | + mutex_unlock(&callback_mutex); | |
2198 | 2189 | } |
2199 | 2190 | |
2200 | 2191 | void cpuset_init_current_mems_allowed(void) |
... | ... | @@ -2380,22 +2371,6 @@ |
2380 | 2371 | if (unlikely(test_thread_flag(TIF_MEMDIE))) |
2381 | 2372 | return 1; |
2382 | 2373 | return 0; |
2383 | -} | |
2384 | - | |
2385 | -/** | |
2386 | - * cpuset_lock - lock out any changes to cpuset structures | |
2387 | - * | |
2388 | - * The out of memory (oom) code needs to mutex_lock cpusets | |
2389 | - * from being changed while it scans the tasklist looking for a | |
2390 | - * task in an overlapping cpuset. Expose callback_mutex via this | |
2391 | - * cpuset_lock() routine, so the oom code can lock it, before | |
2392 | - * locking the task list. The tasklist_lock is a spinlock, so | |
2393 | - * must be taken inside callback_mutex. | |
2394 | - */ | |
2395 | - | |
2396 | -void cpuset_lock(void) | |
2397 | -{ | |
2398 | - mutex_lock(&callback_mutex); | |
2399 | 2374 | } |
2400 | 2375 | |
2401 | 2376 | /** |
kernel/sched.c
... | ... | @@ -2296,11 +2296,9 @@ |
2296 | 2296 | return dest_cpu; |
2297 | 2297 | |
2298 | 2298 | /* No more Mr. Nice Guy. */ |
2299 | - if (dest_cpu >= nr_cpu_ids) { | |
2300 | - rcu_read_lock(); | |
2301 | - cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | |
2302 | - rcu_read_unlock(); | |
2303 | - dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); | |
2299 | + if (unlikely(dest_cpu >= nr_cpu_ids)) { | |
2300 | + cpumask_copy(&p->cpus_allowed, cpu_possible_mask); | |
2301 | + dest_cpu = cpumask_any(cpu_active_mask); | |
2304 | 2302 | |
2305 | 2303 | /* |
2306 | 2304 | * Don't tell them about moving exiting tasks or |
... | ... | @@ -5866,7 +5864,6 @@ |
5866 | 5864 | |
5867 | 5865 | case CPU_DEAD: |
5868 | 5866 | case CPU_DEAD_FROZEN: |
5869 | - cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */ | |
5870 | 5867 | migrate_live_tasks(cpu); |
5871 | 5868 | rq = cpu_rq(cpu); |
5872 | 5869 | kthread_stop(rq->migration_thread); |
... | ... | @@ -5879,7 +5876,6 @@ |
5879 | 5876 | rq->idle->sched_class = &idle_sched_class; |
5880 | 5877 | migrate_dead_tasks(cpu); |
5881 | 5878 | raw_spin_unlock_irq(&rq->lock); |
5882 | - cpuset_unlock(); | |
5883 | 5879 | migrate_nr_uninterruptible(rq); |
5884 | 5880 | BUG_ON(rq->nr_running != 0); |
5885 | 5881 | calc_global_load_remove(rq); |