Commit 7054a071ff7448ab1ca764a5b67f1d16fd981a50

Authored by Peter Zijlstra
Committed by Greg Kroah-Hartman
1 parent 7be87249f8

stop_machine: Fix^2 race between stop_two_cpus() and stop_cpus()

commit 177c53d943368fc97644ebc0a250dc8e2d124250 upstream.

We must use smp_call_function_single(.wait=1) for the
irq_cpu_stop_queue_work() to ensure the queueing is actually done under
stop_cpus_lock. Without this we could have dropped the lock by the time
we do the queueing and get the race we tried to fix.

Fixes: 7053ea1a34fa ("stop_machine: Fix race between stop_two_cpus() and stop_cpus()")

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Prarit Bhargava <prarit@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20140228123905.GK3104@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 1 changed file with 1 additions and 1 deletions Inline Diff

kernel/stop_machine.c
1 /* 1 /*
2 * kernel/stop_machine.c 2 * kernel/stop_machine.c
3 * 3 *
4 * Copyright (C) 2008, 2005 IBM Corporation. 4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au 5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH 6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8 * 8 *
9 * This file is released under the GPLv2 and any later version. 9 * This file is released under the GPLv2 and any later version.
10 */ 10 */
11 #include <linux/completion.h> 11 #include <linux/completion.h>
12 #include <linux/cpu.h> 12 #include <linux/cpu.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/kthread.h> 14 #include <linux/kthread.h>
15 #include <linux/export.h> 15 #include <linux/export.h>
16 #include <linux/percpu.h> 16 #include <linux/percpu.h>
17 #include <linux/sched.h> 17 #include <linux/sched.h>
18 #include <linux/stop_machine.h> 18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h> 19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h> 20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h> 21 #include <linux/smpboot.h>
22 #include <linux/atomic.h> 22 #include <linux/atomic.h>
23 #include <linux/lglock.h> 23 #include <linux/lglock.h>
24 24
25 /* 25 /*
26 * Structure to determine completion condition and record errors. May 26 * Structure to determine completion condition and record errors. May
27 * be shared by works on different cpus. 27 * be shared by works on different cpus.
28 */ 28 */
29 struct cpu_stop_done { 29 struct cpu_stop_done {
30 atomic_t nr_todo; /* nr left to execute */ 30 atomic_t nr_todo; /* nr left to execute */
31 bool executed; /* actually executed? */ 31 bool executed; /* actually executed? */
32 int ret; /* collected return value */ 32 int ret; /* collected return value */
33 struct completion completion; /* fired if nr_todo reaches 0 */ 33 struct completion completion; /* fired if nr_todo reaches 0 */
34 }; 34 };
35 35
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */ 36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
37 struct cpu_stopper { 37 struct cpu_stopper {
38 spinlock_t lock; 38 spinlock_t lock;
39 bool enabled; /* is this stopper enabled? */ 39 bool enabled; /* is this stopper enabled? */
40 struct list_head works; /* list of pending works */ 40 struct list_head works; /* list of pending works */
41 }; 41 };
42 42
43 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); 43 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
44 static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task); 44 static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
45 static bool stop_machine_initialized = false; 45 static bool stop_machine_initialized = false;
46 46
47 /* 47 /*
48 * Avoids a race between stop_two_cpus and global stop_cpus, where 48 * Avoids a race between stop_two_cpus and global stop_cpus, where
49 * the stoppers could get queued up in reverse order, leading to 49 * the stoppers could get queued up in reverse order, leading to
50 * system deadlock. Using an lglock means stop_two_cpus remains 50 * system deadlock. Using an lglock means stop_two_cpus remains
51 * relatively cheap. 51 * relatively cheap.
52 */ 52 */
53 DEFINE_STATIC_LGLOCK(stop_cpus_lock); 53 DEFINE_STATIC_LGLOCK(stop_cpus_lock);
54 54
55 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) 55 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
56 { 56 {
57 memset(done, 0, sizeof(*done)); 57 memset(done, 0, sizeof(*done));
58 atomic_set(&done->nr_todo, nr_todo); 58 atomic_set(&done->nr_todo, nr_todo);
59 init_completion(&done->completion); 59 init_completion(&done->completion);
60 } 60 }
61 61
62 /* signal completion unless @done is NULL */ 62 /* signal completion unless @done is NULL */
63 static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) 63 static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
64 { 64 {
65 if (done) { 65 if (done) {
66 if (executed) 66 if (executed)
67 done->executed = true; 67 done->executed = true;
68 if (atomic_dec_and_test(&done->nr_todo)) 68 if (atomic_dec_and_test(&done->nr_todo))
69 complete(&done->completion); 69 complete(&done->completion);
70 } 70 }
71 } 71 }
72 72
73 /* queue @work to @stopper. if offline, @work is completed immediately */ 73 /* queue @work to @stopper. if offline, @work is completed immediately */
74 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) 74 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
75 { 75 {
76 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 76 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
77 struct task_struct *p = per_cpu(cpu_stopper_task, cpu); 77 struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
78 78
79 unsigned long flags; 79 unsigned long flags;
80 80
81 spin_lock_irqsave(&stopper->lock, flags); 81 spin_lock_irqsave(&stopper->lock, flags);
82 82
83 if (stopper->enabled) { 83 if (stopper->enabled) {
84 list_add_tail(&work->list, &stopper->works); 84 list_add_tail(&work->list, &stopper->works);
85 wake_up_process(p); 85 wake_up_process(p);
86 } else 86 } else
87 cpu_stop_signal_done(work->done, false); 87 cpu_stop_signal_done(work->done, false);
88 88
89 spin_unlock_irqrestore(&stopper->lock, flags); 89 spin_unlock_irqrestore(&stopper->lock, flags);
90 } 90 }
91 91
92 /** 92 /**
93 * stop_one_cpu - stop a cpu 93 * stop_one_cpu - stop a cpu
94 * @cpu: cpu to stop 94 * @cpu: cpu to stop
95 * @fn: function to execute 95 * @fn: function to execute
96 * @arg: argument to @fn 96 * @arg: argument to @fn
97 * 97 *
98 * Execute @fn(@arg) on @cpu. @fn is run in a process context with 98 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
99 * the highest priority preempting any task on the cpu and 99 * the highest priority preempting any task on the cpu and
100 * monopolizing it. This function returns after the execution is 100 * monopolizing it. This function returns after the execution is
101 * complete. 101 * complete.
102 * 102 *
103 * This function doesn't guarantee @cpu stays online till @fn 103 * This function doesn't guarantee @cpu stays online till @fn
104 * completes. If @cpu goes down in the middle, execution may happen 104 * completes. If @cpu goes down in the middle, execution may happen
105 * partially or fully on different cpus. @fn should either be ready 105 * partially or fully on different cpus. @fn should either be ready
106 * for that or the caller should ensure that @cpu stays online until 106 * for that or the caller should ensure that @cpu stays online until
107 * this function completes. 107 * this function completes.
108 * 108 *
109 * CONTEXT: 109 * CONTEXT:
110 * Might sleep. 110 * Might sleep.
111 * 111 *
112 * RETURNS: 112 * RETURNS:
113 * -ENOENT if @fn(@arg) was not executed because @cpu was offline; 113 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
114 * otherwise, the return value of @fn. 114 * otherwise, the return value of @fn.
115 */ 115 */
116 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) 116 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
117 { 117 {
118 struct cpu_stop_done done; 118 struct cpu_stop_done done;
119 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; 119 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
120 120
121 cpu_stop_init_done(&done, 1); 121 cpu_stop_init_done(&done, 1);
122 cpu_stop_queue_work(cpu, &work); 122 cpu_stop_queue_work(cpu, &work);
123 wait_for_completion(&done.completion); 123 wait_for_completion(&done.completion);
124 return done.executed ? done.ret : -ENOENT; 124 return done.executed ? done.ret : -ENOENT;
125 } 125 }
126 126
127 /* This controls the threads on each CPU. */ 127 /* This controls the threads on each CPU. */
128 enum multi_stop_state { 128 enum multi_stop_state {
129 /* Dummy starting state for thread. */ 129 /* Dummy starting state for thread. */
130 MULTI_STOP_NONE, 130 MULTI_STOP_NONE,
131 /* Awaiting everyone to be scheduled. */ 131 /* Awaiting everyone to be scheduled. */
132 MULTI_STOP_PREPARE, 132 MULTI_STOP_PREPARE,
133 /* Disable interrupts. */ 133 /* Disable interrupts. */
134 MULTI_STOP_DISABLE_IRQ, 134 MULTI_STOP_DISABLE_IRQ,
135 /* Run the function */ 135 /* Run the function */
136 MULTI_STOP_RUN, 136 MULTI_STOP_RUN,
137 /* Exit */ 137 /* Exit */
138 MULTI_STOP_EXIT, 138 MULTI_STOP_EXIT,
139 }; 139 };
140 140
141 struct multi_stop_data { 141 struct multi_stop_data {
142 int (*fn)(void *); 142 int (*fn)(void *);
143 void *data; 143 void *data;
144 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ 144 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
145 unsigned int num_threads; 145 unsigned int num_threads;
146 const struct cpumask *active_cpus; 146 const struct cpumask *active_cpus;
147 147
148 enum multi_stop_state state; 148 enum multi_stop_state state;
149 atomic_t thread_ack; 149 atomic_t thread_ack;
150 }; 150 };
151 151
152 static void set_state(struct multi_stop_data *msdata, 152 static void set_state(struct multi_stop_data *msdata,
153 enum multi_stop_state newstate) 153 enum multi_stop_state newstate)
154 { 154 {
155 /* Reset ack counter. */ 155 /* Reset ack counter. */
156 atomic_set(&msdata->thread_ack, msdata->num_threads); 156 atomic_set(&msdata->thread_ack, msdata->num_threads);
157 smp_wmb(); 157 smp_wmb();
158 msdata->state = newstate; 158 msdata->state = newstate;
159 } 159 }
160 160
161 /* Last one to ack a state moves to the next state. */ 161 /* Last one to ack a state moves to the next state. */
162 static void ack_state(struct multi_stop_data *msdata) 162 static void ack_state(struct multi_stop_data *msdata)
163 { 163 {
164 if (atomic_dec_and_test(&msdata->thread_ack)) 164 if (atomic_dec_and_test(&msdata->thread_ack))
165 set_state(msdata, msdata->state + 1); 165 set_state(msdata, msdata->state + 1);
166 } 166 }
167 167
168 /* This is the cpu_stop function which stops the CPU. */ 168 /* This is the cpu_stop function which stops the CPU. */
169 static int multi_cpu_stop(void *data) 169 static int multi_cpu_stop(void *data)
170 { 170 {
171 struct multi_stop_data *msdata = data; 171 struct multi_stop_data *msdata = data;
172 enum multi_stop_state curstate = MULTI_STOP_NONE; 172 enum multi_stop_state curstate = MULTI_STOP_NONE;
173 int cpu = smp_processor_id(), err = 0; 173 int cpu = smp_processor_id(), err = 0;
174 unsigned long flags; 174 unsigned long flags;
175 bool is_active; 175 bool is_active;
176 176
177 /* 177 /*
178 * When called from stop_machine_from_inactive_cpu(), irq might 178 * When called from stop_machine_from_inactive_cpu(), irq might
179 * already be disabled. Save the state and restore it on exit. 179 * already be disabled. Save the state and restore it on exit.
180 */ 180 */
181 local_save_flags(flags); 181 local_save_flags(flags);
182 182
183 if (!msdata->active_cpus) 183 if (!msdata->active_cpus)
184 is_active = cpu == cpumask_first(cpu_online_mask); 184 is_active = cpu == cpumask_first(cpu_online_mask);
185 else 185 else
186 is_active = cpumask_test_cpu(cpu, msdata->active_cpus); 186 is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
187 187
188 /* Simple state machine */ 188 /* Simple state machine */
189 do { 189 do {
190 /* Chill out and ensure we re-read multi_stop_state. */ 190 /* Chill out and ensure we re-read multi_stop_state. */
191 cpu_relax(); 191 cpu_relax();
192 if (msdata->state != curstate) { 192 if (msdata->state != curstate) {
193 curstate = msdata->state; 193 curstate = msdata->state;
194 switch (curstate) { 194 switch (curstate) {
195 case MULTI_STOP_DISABLE_IRQ: 195 case MULTI_STOP_DISABLE_IRQ:
196 local_irq_disable(); 196 local_irq_disable();
197 hard_irq_disable(); 197 hard_irq_disable();
198 break; 198 break;
199 case MULTI_STOP_RUN: 199 case MULTI_STOP_RUN:
200 if (is_active) 200 if (is_active)
201 err = msdata->fn(msdata->data); 201 err = msdata->fn(msdata->data);
202 break; 202 break;
203 default: 203 default:
204 break; 204 break;
205 } 205 }
206 ack_state(msdata); 206 ack_state(msdata);
207 } 207 }
208 } while (curstate != MULTI_STOP_EXIT); 208 } while (curstate != MULTI_STOP_EXIT);
209 209
210 local_irq_restore(flags); 210 local_irq_restore(flags);
211 return err; 211 return err;
212 } 212 }
213 213
214 struct irq_cpu_stop_queue_work_info { 214 struct irq_cpu_stop_queue_work_info {
215 int cpu1; 215 int cpu1;
216 int cpu2; 216 int cpu2;
217 struct cpu_stop_work *work1; 217 struct cpu_stop_work *work1;
218 struct cpu_stop_work *work2; 218 struct cpu_stop_work *work2;
219 }; 219 };
220 220
221 /* 221 /*
222 * This function is always run with irqs and preemption disabled. 222 * This function is always run with irqs and preemption disabled.
223 * This guarantees that both work1 and work2 get queued, before 223 * This guarantees that both work1 and work2 get queued, before
224 * our local migrate thread gets the chance to preempt us. 224 * our local migrate thread gets the chance to preempt us.
225 */ 225 */
226 static void irq_cpu_stop_queue_work(void *arg) 226 static void irq_cpu_stop_queue_work(void *arg)
227 { 227 {
228 struct irq_cpu_stop_queue_work_info *info = arg; 228 struct irq_cpu_stop_queue_work_info *info = arg;
229 cpu_stop_queue_work(info->cpu1, info->work1); 229 cpu_stop_queue_work(info->cpu1, info->work1);
230 cpu_stop_queue_work(info->cpu2, info->work2); 230 cpu_stop_queue_work(info->cpu2, info->work2);
231 } 231 }
232 232
233 /** 233 /**
234 * stop_two_cpus - stops two cpus 234 * stop_two_cpus - stops two cpus
235 * @cpu1: the cpu to stop 235 * @cpu1: the cpu to stop
236 * @cpu2: the other cpu to stop 236 * @cpu2: the other cpu to stop
237 * @fn: function to execute 237 * @fn: function to execute
238 * @arg: argument to @fn 238 * @arg: argument to @fn
239 * 239 *
240 * Stops both the current and specified CPU and runs @fn on one of them. 240 * Stops both the current and specified CPU and runs @fn on one of them.
241 * 241 *
242 * returns when both are completed. 242 * returns when both are completed.
243 */ 243 */
244 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) 244 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
245 { 245 {
246 struct cpu_stop_done done; 246 struct cpu_stop_done done;
247 struct cpu_stop_work work1, work2; 247 struct cpu_stop_work work1, work2;
248 struct irq_cpu_stop_queue_work_info call_args; 248 struct irq_cpu_stop_queue_work_info call_args;
249 struct multi_stop_data msdata; 249 struct multi_stop_data msdata;
250 250
251 preempt_disable(); 251 preempt_disable();
252 msdata = (struct multi_stop_data){ 252 msdata = (struct multi_stop_data){
253 .fn = fn, 253 .fn = fn,
254 .data = arg, 254 .data = arg,
255 .num_threads = 2, 255 .num_threads = 2,
256 .active_cpus = cpumask_of(cpu1), 256 .active_cpus = cpumask_of(cpu1),
257 }; 257 };
258 258
259 work1 = work2 = (struct cpu_stop_work){ 259 work1 = work2 = (struct cpu_stop_work){
260 .fn = multi_cpu_stop, 260 .fn = multi_cpu_stop,
261 .arg = &msdata, 261 .arg = &msdata,
262 .done = &done 262 .done = &done
263 }; 263 };
264 264
265 call_args = (struct irq_cpu_stop_queue_work_info){ 265 call_args = (struct irq_cpu_stop_queue_work_info){
266 .cpu1 = cpu1, 266 .cpu1 = cpu1,
267 .cpu2 = cpu2, 267 .cpu2 = cpu2,
268 .work1 = &work1, 268 .work1 = &work1,
269 .work2 = &work2, 269 .work2 = &work2,
270 }; 270 };
271 271
272 cpu_stop_init_done(&done, 2); 272 cpu_stop_init_done(&done, 2);
273 set_state(&msdata, MULTI_STOP_PREPARE); 273 set_state(&msdata, MULTI_STOP_PREPARE);
274 274
275 /* 275 /*
276 * If we observe both CPUs active we know _cpu_down() cannot yet have 276 * If we observe both CPUs active we know _cpu_down() cannot yet have
277 * queued its stop_machine works and therefore ours will get executed 277 * queued its stop_machine works and therefore ours will get executed
278 * first. Or its not either one of our CPUs that's getting unplugged, 278 * first. Or its not either one of our CPUs that's getting unplugged,
279 * in which case we don't care. 279 * in which case we don't care.
280 * 280 *
281 * This relies on the stopper workqueues to be FIFO. 281 * This relies on the stopper workqueues to be FIFO.
282 */ 282 */
283 if (!cpu_active(cpu1) || !cpu_active(cpu2)) { 283 if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
284 preempt_enable(); 284 preempt_enable();
285 return -ENOENT; 285 return -ENOENT;
286 } 286 }
287 287
288 lg_local_lock(&stop_cpus_lock); 288 lg_local_lock(&stop_cpus_lock);
289 /* 289 /*
290 * Queuing needs to be done by the lowest numbered CPU, to ensure 290 * Queuing needs to be done by the lowest numbered CPU, to ensure
291 * that works are always queued in the same order on every CPU. 291 * that works are always queued in the same order on every CPU.
292 * This prevents deadlocks. 292 * This prevents deadlocks.
293 */ 293 */
294 smp_call_function_single(min(cpu1, cpu2), 294 smp_call_function_single(min(cpu1, cpu2),
295 &irq_cpu_stop_queue_work, 295 &irq_cpu_stop_queue_work,
296 &call_args, 0); 296 &call_args, 1);
297 lg_local_unlock(&stop_cpus_lock); 297 lg_local_unlock(&stop_cpus_lock);
298 preempt_enable(); 298 preempt_enable();
299 299
300 wait_for_completion(&done.completion); 300 wait_for_completion(&done.completion);
301 301
302 return done.executed ? done.ret : -ENOENT; 302 return done.executed ? done.ret : -ENOENT;
303 } 303 }
304 304
305 /** 305 /**
306 * stop_one_cpu_nowait - stop a cpu but don't wait for completion 306 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
307 * @cpu: cpu to stop 307 * @cpu: cpu to stop
308 * @fn: function to execute 308 * @fn: function to execute
309 * @arg: argument to @fn 309 * @arg: argument to @fn
310 * 310 *
311 * Similar to stop_one_cpu() but doesn't wait for completion. The 311 * Similar to stop_one_cpu() but doesn't wait for completion. The
312 * caller is responsible for ensuring @work_buf is currently unused 312 * caller is responsible for ensuring @work_buf is currently unused
313 * and will remain untouched until stopper starts executing @fn. 313 * and will remain untouched until stopper starts executing @fn.
314 * 314 *
315 * CONTEXT: 315 * CONTEXT:
316 * Don't care. 316 * Don't care.
317 */ 317 */
318 void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, 318 void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
319 struct cpu_stop_work *work_buf) 319 struct cpu_stop_work *work_buf)
320 { 320 {
321 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; 321 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
322 cpu_stop_queue_work(cpu, work_buf); 322 cpu_stop_queue_work(cpu, work_buf);
323 } 323 }
324 324
325 /* static data for stop_cpus */ 325 /* static data for stop_cpus */
326 static DEFINE_MUTEX(stop_cpus_mutex); 326 static DEFINE_MUTEX(stop_cpus_mutex);
327 static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); 327 static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
328 328
329 static void queue_stop_cpus_work(const struct cpumask *cpumask, 329 static void queue_stop_cpus_work(const struct cpumask *cpumask,
330 cpu_stop_fn_t fn, void *arg, 330 cpu_stop_fn_t fn, void *arg,
331 struct cpu_stop_done *done) 331 struct cpu_stop_done *done)
332 { 332 {
333 struct cpu_stop_work *work; 333 struct cpu_stop_work *work;
334 unsigned int cpu; 334 unsigned int cpu;
335 335
336 /* initialize works and done */ 336 /* initialize works and done */
337 for_each_cpu(cpu, cpumask) { 337 for_each_cpu(cpu, cpumask) {
338 work = &per_cpu(stop_cpus_work, cpu); 338 work = &per_cpu(stop_cpus_work, cpu);
339 work->fn = fn; 339 work->fn = fn;
340 work->arg = arg; 340 work->arg = arg;
341 work->done = done; 341 work->done = done;
342 } 342 }
343 343
344 /* 344 /*
345 * Disable preemption while queueing to avoid getting 345 * Disable preemption while queueing to avoid getting
346 * preempted by a stopper which might wait for other stoppers 346 * preempted by a stopper which might wait for other stoppers
347 * to enter @fn which can lead to deadlock. 347 * to enter @fn which can lead to deadlock.
348 */ 348 */
349 lg_global_lock(&stop_cpus_lock); 349 lg_global_lock(&stop_cpus_lock);
350 for_each_cpu(cpu, cpumask) 350 for_each_cpu(cpu, cpumask)
351 cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); 351 cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
352 lg_global_unlock(&stop_cpus_lock); 352 lg_global_unlock(&stop_cpus_lock);
353 } 353 }
354 354
355 static int __stop_cpus(const struct cpumask *cpumask, 355 static int __stop_cpus(const struct cpumask *cpumask,
356 cpu_stop_fn_t fn, void *arg) 356 cpu_stop_fn_t fn, void *arg)
357 { 357 {
358 struct cpu_stop_done done; 358 struct cpu_stop_done done;
359 359
360 cpu_stop_init_done(&done, cpumask_weight(cpumask)); 360 cpu_stop_init_done(&done, cpumask_weight(cpumask));
361 queue_stop_cpus_work(cpumask, fn, arg, &done); 361 queue_stop_cpus_work(cpumask, fn, arg, &done);
362 wait_for_completion(&done.completion); 362 wait_for_completion(&done.completion);
363 return done.executed ? done.ret : -ENOENT; 363 return done.executed ? done.ret : -ENOENT;
364 } 364 }
365 365
366 /** 366 /**
367 * stop_cpus - stop multiple cpus 367 * stop_cpus - stop multiple cpus
368 * @cpumask: cpus to stop 368 * @cpumask: cpus to stop
369 * @fn: function to execute 369 * @fn: function to execute
370 * @arg: argument to @fn 370 * @arg: argument to @fn
371 * 371 *
372 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, 372 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
373 * @fn is run in a process context with the highest priority 373 * @fn is run in a process context with the highest priority
374 * preempting any task on the cpu and monopolizing it. This function 374 * preempting any task on the cpu and monopolizing it. This function
375 * returns after all executions are complete. 375 * returns after all executions are complete.
376 * 376 *
377 * This function doesn't guarantee the cpus in @cpumask stay online 377 * This function doesn't guarantee the cpus in @cpumask stay online
378 * till @fn completes. If some cpus go down in the middle, execution 378 * till @fn completes. If some cpus go down in the middle, execution
379 * on the cpu may happen partially or fully on different cpus. @fn 379 * on the cpu may happen partially or fully on different cpus. @fn
380 * should either be ready for that or the caller should ensure that 380 * should either be ready for that or the caller should ensure that
381 * the cpus stay online until this function completes. 381 * the cpus stay online until this function completes.
382 * 382 *
383 * All stop_cpus() calls are serialized making it safe for @fn to wait 383 * All stop_cpus() calls are serialized making it safe for @fn to wait
384 * for all cpus to start executing it. 384 * for all cpus to start executing it.
385 * 385 *
386 * CONTEXT: 386 * CONTEXT:
387 * Might sleep. 387 * Might sleep.
388 * 388 *
389 * RETURNS: 389 * RETURNS:
390 * -ENOENT if @fn(@arg) was not executed at all because all cpus in 390 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
391 * @cpumask were offline; otherwise, 0 if all executions of @fn 391 * @cpumask were offline; otherwise, 0 if all executions of @fn
392 * returned 0, any non zero return value if any returned non zero. 392 * returned 0, any non zero return value if any returned non zero.
393 */ 393 */
394 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 394 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
395 { 395 {
396 int ret; 396 int ret;
397 397
398 /* static works are used, process one request at a time */ 398 /* static works are used, process one request at a time */
399 mutex_lock(&stop_cpus_mutex); 399 mutex_lock(&stop_cpus_mutex);
400 ret = __stop_cpus(cpumask, fn, arg); 400 ret = __stop_cpus(cpumask, fn, arg);
401 mutex_unlock(&stop_cpus_mutex); 401 mutex_unlock(&stop_cpus_mutex);
402 return ret; 402 return ret;
403 } 403 }
404 404
405 /** 405 /**
406 * try_stop_cpus - try to stop multiple cpus 406 * try_stop_cpus - try to stop multiple cpus
407 * @cpumask: cpus to stop 407 * @cpumask: cpus to stop
408 * @fn: function to execute 408 * @fn: function to execute
409 * @arg: argument to @fn 409 * @arg: argument to @fn
410 * 410 *
411 * Identical to stop_cpus() except that it fails with -EAGAIN if 411 * Identical to stop_cpus() except that it fails with -EAGAIN if
412 * someone else is already using the facility. 412 * someone else is already using the facility.
413 * 413 *
414 * CONTEXT: 414 * CONTEXT:
415 * Might sleep. 415 * Might sleep.
416 * 416 *
417 * RETURNS: 417 * RETURNS:
418 * -EAGAIN if someone else is already stopping cpus, -ENOENT if 418 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
419 * @fn(@arg) was not executed at all because all cpus in @cpumask were 419 * @fn(@arg) was not executed at all because all cpus in @cpumask were
420 * offline; otherwise, 0 if all executions of @fn returned 0, any non 420 * offline; otherwise, 0 if all executions of @fn returned 0, any non
421 * zero return value if any returned non zero. 421 * zero return value if any returned non zero.
422 */ 422 */
423 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 423 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
424 { 424 {
425 int ret; 425 int ret;
426 426
427 /* static works are used, process one request at a time */ 427 /* static works are used, process one request at a time */
428 if (!mutex_trylock(&stop_cpus_mutex)) 428 if (!mutex_trylock(&stop_cpus_mutex))
429 return -EAGAIN; 429 return -EAGAIN;
430 ret = __stop_cpus(cpumask, fn, arg); 430 ret = __stop_cpus(cpumask, fn, arg);
431 mutex_unlock(&stop_cpus_mutex); 431 mutex_unlock(&stop_cpus_mutex);
432 return ret; 432 return ret;
433 } 433 }
434 434
435 static int cpu_stop_should_run(unsigned int cpu) 435 static int cpu_stop_should_run(unsigned int cpu)
436 { 436 {
437 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 437 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
438 unsigned long flags; 438 unsigned long flags;
439 int run; 439 int run;
440 440
441 spin_lock_irqsave(&stopper->lock, flags); 441 spin_lock_irqsave(&stopper->lock, flags);
442 run = !list_empty(&stopper->works); 442 run = !list_empty(&stopper->works);
443 spin_unlock_irqrestore(&stopper->lock, flags); 443 spin_unlock_irqrestore(&stopper->lock, flags);
444 return run; 444 return run;
445 } 445 }
446 446
447 static void cpu_stopper_thread(unsigned int cpu) 447 static void cpu_stopper_thread(unsigned int cpu)
448 { 448 {
449 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 449 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
450 struct cpu_stop_work *work; 450 struct cpu_stop_work *work;
451 int ret; 451 int ret;
452 452
453 repeat: 453 repeat:
454 work = NULL; 454 work = NULL;
455 spin_lock_irq(&stopper->lock); 455 spin_lock_irq(&stopper->lock);
456 if (!list_empty(&stopper->works)) { 456 if (!list_empty(&stopper->works)) {
457 work = list_first_entry(&stopper->works, 457 work = list_first_entry(&stopper->works,
458 struct cpu_stop_work, list); 458 struct cpu_stop_work, list);
459 list_del_init(&work->list); 459 list_del_init(&work->list);
460 } 460 }
461 spin_unlock_irq(&stopper->lock); 461 spin_unlock_irq(&stopper->lock);
462 462
463 if (work) { 463 if (work) {
464 cpu_stop_fn_t fn = work->fn; 464 cpu_stop_fn_t fn = work->fn;
465 void *arg = work->arg; 465 void *arg = work->arg;
466 struct cpu_stop_done *done = work->done; 466 struct cpu_stop_done *done = work->done;
467 char ksym_buf[KSYM_NAME_LEN] __maybe_unused; 467 char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
468 468
469 /* cpu stop callbacks are not allowed to sleep */ 469 /* cpu stop callbacks are not allowed to sleep */
470 preempt_disable(); 470 preempt_disable();
471 471
472 ret = fn(arg); 472 ret = fn(arg);
473 if (ret) 473 if (ret)
474 done->ret = ret; 474 done->ret = ret;
475 475
476 /* restore preemption and check it's still balanced */ 476 /* restore preemption and check it's still balanced */
477 preempt_enable(); 477 preempt_enable();
478 WARN_ONCE(preempt_count(), 478 WARN_ONCE(preempt_count(),
479 "cpu_stop: %s(%p) leaked preempt count\n", 479 "cpu_stop: %s(%p) leaked preempt count\n",
480 kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, 480 kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
481 ksym_buf), arg); 481 ksym_buf), arg);
482 482
483 cpu_stop_signal_done(done, true); 483 cpu_stop_signal_done(done, true);
484 goto repeat; 484 goto repeat;
485 } 485 }
486 } 486 }
487 487
488 extern void sched_set_stop_task(int cpu, struct task_struct *stop); 488 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
489 489
490 static void cpu_stop_create(unsigned int cpu) 490 static void cpu_stop_create(unsigned int cpu)
491 { 491 {
492 sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu)); 492 sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
493 } 493 }
494 494
495 static void cpu_stop_park(unsigned int cpu) 495 static void cpu_stop_park(unsigned int cpu)
496 { 496 {
497 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 497 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
498 struct cpu_stop_work *work; 498 struct cpu_stop_work *work;
499 unsigned long flags; 499 unsigned long flags;
500 500
501 /* drain remaining works */ 501 /* drain remaining works */
502 spin_lock_irqsave(&stopper->lock, flags); 502 spin_lock_irqsave(&stopper->lock, flags);
503 list_for_each_entry(work, &stopper->works, list) 503 list_for_each_entry(work, &stopper->works, list)
504 cpu_stop_signal_done(work->done, false); 504 cpu_stop_signal_done(work->done, false);
505 stopper->enabled = false; 505 stopper->enabled = false;
506 spin_unlock_irqrestore(&stopper->lock, flags); 506 spin_unlock_irqrestore(&stopper->lock, flags);
507 } 507 }
508 508
509 static void cpu_stop_unpark(unsigned int cpu) 509 static void cpu_stop_unpark(unsigned int cpu)
510 { 510 {
511 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 511 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
512 512
513 spin_lock_irq(&stopper->lock); 513 spin_lock_irq(&stopper->lock);
514 stopper->enabled = true; 514 stopper->enabled = true;
515 spin_unlock_irq(&stopper->lock); 515 spin_unlock_irq(&stopper->lock);
516 } 516 }
517 517
518 static struct smp_hotplug_thread cpu_stop_threads = { 518 static struct smp_hotplug_thread cpu_stop_threads = {
519 .store = &cpu_stopper_task, 519 .store = &cpu_stopper_task,
520 .thread_should_run = cpu_stop_should_run, 520 .thread_should_run = cpu_stop_should_run,
521 .thread_fn = cpu_stopper_thread, 521 .thread_fn = cpu_stopper_thread,
522 .thread_comm = "migration/%u", 522 .thread_comm = "migration/%u",
523 .create = cpu_stop_create, 523 .create = cpu_stop_create,
524 .setup = cpu_stop_unpark, 524 .setup = cpu_stop_unpark,
525 .park = cpu_stop_park, 525 .park = cpu_stop_park,
526 .pre_unpark = cpu_stop_unpark, 526 .pre_unpark = cpu_stop_unpark,
527 .selfparking = true, 527 .selfparking = true,
528 }; 528 };
529 529
530 static int __init cpu_stop_init(void) 530 static int __init cpu_stop_init(void)
531 { 531 {
532 unsigned int cpu; 532 unsigned int cpu;
533 533
534 for_each_possible_cpu(cpu) { 534 for_each_possible_cpu(cpu) {
535 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 535 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
536 536
537 spin_lock_init(&stopper->lock); 537 spin_lock_init(&stopper->lock);
538 INIT_LIST_HEAD(&stopper->works); 538 INIT_LIST_HEAD(&stopper->works);
539 } 539 }
540 540
541 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); 541 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
542 stop_machine_initialized = true; 542 stop_machine_initialized = true;
543 return 0; 543 return 0;
544 } 544 }
545 early_initcall(cpu_stop_init); 545 early_initcall(cpu_stop_init);
546 546
547 #ifdef CONFIG_STOP_MACHINE 547 #ifdef CONFIG_STOP_MACHINE
548 548
549 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 549 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
550 { 550 {
551 struct multi_stop_data msdata = { 551 struct multi_stop_data msdata = {
552 .fn = fn, 552 .fn = fn,
553 .data = data, 553 .data = data,
554 .num_threads = num_online_cpus(), 554 .num_threads = num_online_cpus(),
555 .active_cpus = cpus, 555 .active_cpus = cpus,
556 }; 556 };
557 557
558 if (!stop_machine_initialized) { 558 if (!stop_machine_initialized) {
559 /* 559 /*
560 * Handle the case where stop_machine() is called 560 * Handle the case where stop_machine() is called
561 * early in boot before stop_machine() has been 561 * early in boot before stop_machine() has been
562 * initialized. 562 * initialized.
563 */ 563 */
564 unsigned long flags; 564 unsigned long flags;
565 int ret; 565 int ret;
566 566
567 WARN_ON_ONCE(msdata.num_threads != 1); 567 WARN_ON_ONCE(msdata.num_threads != 1);
568 568
569 local_irq_save(flags); 569 local_irq_save(flags);
570 hard_irq_disable(); 570 hard_irq_disable();
571 ret = (*fn)(data); 571 ret = (*fn)(data);
572 local_irq_restore(flags); 572 local_irq_restore(flags);
573 573
574 return ret; 574 return ret;
575 } 575 }
576 576
577 /* Set the initial state and stop all online cpus. */ 577 /* Set the initial state and stop all online cpus. */
578 set_state(&msdata, MULTI_STOP_PREPARE); 578 set_state(&msdata, MULTI_STOP_PREPARE);
579 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); 579 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
580 } 580 }
581 581
582 int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 582 int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
583 { 583 {
584 int ret; 584 int ret;
585 585
586 /* No CPUs can come up or down during this. */ 586 /* No CPUs can come up or down during this. */
587 get_online_cpus(); 587 get_online_cpus();
588 ret = __stop_machine(fn, data, cpus); 588 ret = __stop_machine(fn, data, cpus);
589 put_online_cpus(); 589 put_online_cpus();
590 return ret; 590 return ret;
591 } 591 }
592 EXPORT_SYMBOL_GPL(stop_machine); 592 EXPORT_SYMBOL_GPL(stop_machine);
593 593
594 /** 594 /**
595 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU 595 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
596 * @fn: the function to run 596 * @fn: the function to run
597 * @data: the data ptr for the @fn() 597 * @data: the data ptr for the @fn()
598 * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 598 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
599 * 599 *
600 * This is identical to stop_machine() but can be called from a CPU which 600 * This is identical to stop_machine() but can be called from a CPU which
601 * is not active. The local CPU is in the process of hotplug (so no other 601 * is not active. The local CPU is in the process of hotplug (so no other
602 * CPU hotplug can start) and not marked active and doesn't have enough 602 * CPU hotplug can start) and not marked active and doesn't have enough
603 * context to sleep. 603 * context to sleep.
604 * 604 *
605 * This function provides stop_machine() functionality for such state by 605 * This function provides stop_machine() functionality for such state by
606 * using busy-wait for synchronization and executing @fn directly for local 606 * using busy-wait for synchronization and executing @fn directly for local
607 * CPU. 607 * CPU.
608 * 608 *
609 * CONTEXT: 609 * CONTEXT:
610 * Local CPU is inactive. Temporarily stops all active CPUs. 610 * Local CPU is inactive. Temporarily stops all active CPUs.
611 * 611 *
612 * RETURNS: 612 * RETURNS:
613 * 0 if all executions of @fn returned 0, any non zero return value if any 613 * 0 if all executions of @fn returned 0, any non zero return value if any
614 * returned non zero. 614 * returned non zero.
615 */ 615 */
616 int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, 616 int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
617 const struct cpumask *cpus) 617 const struct cpumask *cpus)
618 { 618 {
619 struct multi_stop_data msdata = { .fn = fn, .data = data, 619 struct multi_stop_data msdata = { .fn = fn, .data = data,
620 .active_cpus = cpus }; 620 .active_cpus = cpus };
621 struct cpu_stop_done done; 621 struct cpu_stop_done done;
622 int ret; 622 int ret;
623 623
624 /* Local CPU must be inactive and CPU hotplug in progress. */ 624 /* Local CPU must be inactive and CPU hotplug in progress. */
625 BUG_ON(cpu_active(raw_smp_processor_id())); 625 BUG_ON(cpu_active(raw_smp_processor_id()));
626 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ 626 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
627 627
628 /* No proper task established and can't sleep - busy wait for lock. */ 628 /* No proper task established and can't sleep - busy wait for lock. */
629 while (!mutex_trylock(&stop_cpus_mutex)) 629 while (!mutex_trylock(&stop_cpus_mutex))
630 cpu_relax(); 630 cpu_relax();
631 631
632 /* Schedule work on other CPUs and execute directly for local CPU */ 632 /* Schedule work on other CPUs and execute directly for local CPU */
633 set_state(&msdata, MULTI_STOP_PREPARE); 633 set_state(&msdata, MULTI_STOP_PREPARE);
634 cpu_stop_init_done(&done, num_active_cpus()); 634 cpu_stop_init_done(&done, num_active_cpus());
635 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, 635 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
636 &done); 636 &done);
637 ret = multi_cpu_stop(&msdata); 637 ret = multi_cpu_stop(&msdata);
638 638
639 /* Busy wait for completion. */ 639 /* Busy wait for completion. */
640 while (!completion_done(&done.completion)) 640 while (!completion_done(&done.completion))
641 cpu_relax(); 641 cpu_relax();
642 642
643 mutex_unlock(&stop_cpus_mutex); 643 mutex_unlock(&stop_cpus_mutex);
644 return ret ?: done.ret; 644 return ret ?: done.ret;
645 } 645 }
646 646
647 #endif /* CONFIG_STOP_MACHINE */ 647 #endif /* CONFIG_STOP_MACHINE */
648 648