Commit 8b28499a71d3431c9128abc743e2d2bfbdae3ed4
Committed by
Jens Axboe
1 parent
08eed44c72
Exists in
master
and in
13 other branches
smp: Consolidate the various smp_call_function_single() declensions
__smp_call_function_single() and smp_call_function_single() share some code that can be factorized: execute inline when the target is local, check if the target is online, lock the csd, call generic_exec_single(). Lets move the common parts to generic_exec_single(). Reviewed-by: Jan Kara <jack@suse.cz> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jan Kara <jack@suse.cz> Cc: Jens Axboe <axboe@fb.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Showing 1 changed file with 39 additions and 41 deletions Side-by-side Diff
kernel/smp.c
... | ... | @@ -117,13 +117,43 @@ |
117 | 117 | csd->flags &= ~CSD_FLAG_LOCK; |
118 | 118 | } |
119 | 119 | |
120 | +static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | |
121 | + | |
120 | 122 | /* |
121 | 123 | * Insert a previously allocated call_single_data element |
122 | 124 | * for execution on the given CPU. data must already have |
123 | 125 | * ->func, ->info, and ->flags set. |
124 | 126 | */ |
125 | -static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | |
127 | +static int generic_exec_single(int cpu, struct call_single_data *csd, | |
128 | + smp_call_func_t func, void *info, int wait) | |
126 | 129 | { |
130 | + struct call_single_data csd_stack = { .flags = 0 }; | |
131 | + unsigned long flags; | |
132 | + | |
133 | + | |
134 | + if (cpu == smp_processor_id()) { | |
135 | + local_irq_save(flags); | |
136 | + func(info); | |
137 | + local_irq_restore(flags); | |
138 | + return 0; | |
139 | + } | |
140 | + | |
141 | + | |
142 | + if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) | |
143 | + return -ENXIO; | |
144 | + | |
145 | + | |
146 | + if (!csd) { | |
147 | + csd = &csd_stack; | |
148 | + if (!wait) | |
149 | + csd = &__get_cpu_var(csd_data); | |
150 | + } | |
151 | + | |
152 | + csd_lock(csd); | |
153 | + | |
154 | + csd->func = func; | |
155 | + csd->info = info; | |
156 | + | |
127 | 157 | if (wait) |
128 | 158 | csd->flags |= CSD_FLAG_WAIT; |
129 | 159 | |
... | ... | @@ -143,6 +173,8 @@ |
143 | 173 | |
144 | 174 | if (wait) |
145 | 175 | csd_lock_wait(csd); |
176 | + | |
177 | + return 0; | |
146 | 178 | } |
147 | 179 | |
148 | 180 | /* |
... | ... | @@ -168,8 +200,6 @@ |
168 | 200 | } |
169 | 201 | } |
170 | 202 | |
171 | -static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); | |
172 | - | |
173 | 203 | /* |
174 | 204 | * smp_call_function_single - Run a function on a specific CPU |
175 | 205 | * @func: The function to run. This must be fast and non-blocking. |
176 | 206 | |
... | ... | @@ -181,12 +211,8 @@ |
181 | 211 | int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
182 | 212 | int wait) |
183 | 213 | { |
184 | - struct call_single_data d = { | |
185 | - .flags = 0, | |
186 | - }; | |
187 | - unsigned long flags; | |
188 | 214 | int this_cpu; |
189 | - int err = 0; | |
215 | + int err; | |
190 | 216 | |
191 | 217 | /* |
192 | 218 | * prevent preemption and reschedule on another processor, |
193 | 219 | |
... | ... | @@ -203,27 +229,8 @@ |
203 | 229 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
204 | 230 | && !oops_in_progress); |
205 | 231 | |
206 | - if (cpu == this_cpu) { | |
207 | - local_irq_save(flags); | |
208 | - func(info); | |
209 | - local_irq_restore(flags); | |
210 | - } else { | |
211 | - if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { | |
212 | - struct call_single_data *csd = &d; | |
232 | + err = generic_exec_single(cpu, NULL, func, info, wait); | |
213 | 233 | |
214 | - if (!wait) | |
215 | - csd = &__get_cpu_var(csd_data); | |
216 | - | |
217 | - csd_lock(csd); | |
218 | - | |
219 | - csd->func = func; | |
220 | - csd->info = info; | |
221 | - generic_exec_single(cpu, csd, wait); | |
222 | - } else { | |
223 | - err = -ENXIO; /* CPU not online */ | |
224 | - } | |
225 | - } | |
226 | - | |
227 | 234 | put_cpu(); |
228 | 235 | |
229 | 236 | return err; |
230 | 237 | |
... | ... | @@ -285,9 +292,8 @@ |
285 | 292 | */ |
286 | 293 | int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait) |
287 | 294 | { |
288 | - unsigned int this_cpu; | |
289 | - unsigned long flags; | |
290 | 295 | int err = 0; |
296 | + int this_cpu; | |
291 | 297 | |
292 | 298 | this_cpu = get_cpu(); |
293 | 299 | /* |
294 | 300 | |
295 | 301 | |
... | ... | @@ -296,20 +302,12 @@ |
296 | 302 | * send smp call function interrupt to this cpu and as such deadlocks |
297 | 303 | * can't happen. |
298 | 304 | */ |
299 | - WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | |
305 | + WARN_ON_ONCE(cpu_online(this_cpu) && wait && irqs_disabled() | |
300 | 306 | && !oops_in_progress); |
301 | 307 | |
302 | - if (cpu == this_cpu) { | |
303 | - local_irq_save(flags); | |
304 | - csd->func(csd->info); | |
305 | - local_irq_restore(flags); | |
306 | - } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { | |
307 | - csd_lock(csd); | |
308 | - generic_exec_single(cpu, csd, wait); | |
309 | - } else { | |
310 | - err = -ENXIO; /* CPU not online */ | |
311 | - } | |
308 | + err = generic_exec_single(cpu, csd, csd->func, csd->info, wait); | |
312 | 309 | put_cpu(); |
310 | + | |
313 | 311 | return err; |
314 | 312 | } |
315 | 313 | EXPORT_SYMBOL_GPL(__smp_call_function_single); |