Commit 3b16cf874861436725c43ba0b68bdd799297be7c
1 parent
3d44223327
Exists in
master
and in
7 other branches
x86: convert to generic helpers for IPI function calls
This converts x86, x86-64, and xen to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Showing 20 changed files with 125 additions and 380 deletions Side-by-side Diff
- arch/x86/Kconfig
- arch/x86/kernel/apic_32.c
- arch/x86/kernel/entry_64.S
- arch/x86/kernel/i8259_64.c
- arch/x86/kernel/smp.c
- arch/x86/kernel/smpboot.c
- arch/x86/kernel/smpcommon.c
- arch/x86/mach-voyager/voyager_smp.c
- arch/x86/xen/enlighten.c
- arch/x86/xen/mmu.c
- arch/x86/xen/smp.c
- arch/x86/xen/xen-ops.h
- include/asm-x86/hw_irq_32.h
- include/asm-x86/hw_irq_64.h
- include/asm-x86/mach-default/entry_arch.h
- include/asm-x86/mach-default/irq_vectors.h
- include/asm-x86/mach-voyager/entry_arch.h
- include/asm-x86/mach-voyager/irq_vectors.h
- include/asm-x86/smp.h
- include/asm-x86/xen/events.h
arch/x86/Kconfig
arch/x86/kernel/apic_32.c
... | ... | @@ -1358,6 +1358,10 @@ |
1358 | 1358 | |
1359 | 1359 | /* IPI for generic function call */ |
1360 | 1360 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
1361 | + | |
1362 | + /* IPI for single call function */ | |
1363 | + set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | |
1364 | + call_function_single_interrupt); | |
1361 | 1365 | } |
1362 | 1366 | #endif |
1363 | 1367 |
arch/x86/kernel/entry_64.S
... | ... | @@ -711,6 +711,9 @@ |
711 | 711 | ENTRY(call_function_interrupt) |
712 | 712 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt |
713 | 713 | END(call_function_interrupt) |
714 | +ENTRY(call_function_single_interrupt) | |
715 | + apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt | |
716 | +END(call_function_single_interrupt) | |
714 | 717 | ENTRY(irq_move_cleanup_interrupt) |
715 | 718 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt |
716 | 719 | END(irq_move_cleanup_interrupt) |
arch/x86/kernel/i8259_64.c
... | ... | @@ -494,6 +494,10 @@ |
494 | 494 | /* IPI for generic function call */ |
495 | 495 | set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
496 | 496 | |
497 | + /* IPI for generic single function call */ | |
498 | + set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | |
499 | + call_function_single_interrupt); | |
500 | + | |
497 | 501 | /* Low priority IPI to cleanup after moving an irq */ |
498 | 502 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
499 | 503 | #endif |
arch/x86/kernel/smp.c
... | ... | @@ -121,132 +121,23 @@ |
121 | 121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); |
122 | 122 | } |
123 | 123 | |
124 | -/* | |
125 | - * Structure and data for smp_call_function(). This is designed to minimise | |
126 | - * static memory requirements. It also looks cleaner. | |
127 | - */ | |
128 | -static DEFINE_SPINLOCK(call_lock); | |
129 | - | |
130 | -struct call_data_struct { | |
131 | - void (*func) (void *info); | |
132 | - void *info; | |
133 | - atomic_t started; | |
134 | - atomic_t finished; | |
135 | - int wait; | |
136 | -}; | |
137 | - | |
138 | -void lock_ipi_call_lock(void) | |
124 | +void native_send_call_func_single_ipi(int cpu) | |
139 | 125 | { |
140 | - spin_lock_irq(&call_lock); | |
126 | + send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); | |
141 | 127 | } |
142 | 128 | |
143 | -void unlock_ipi_call_lock(void) | |
129 | +void native_send_call_func_ipi(cpumask_t mask) | |
144 | 130 | { |
145 | - spin_unlock_irq(&call_lock); | |
146 | -} | |
147 | - | |
148 | -static struct call_data_struct *call_data; | |
149 | - | |
150 | -static void __smp_call_function(void (*func) (void *info), void *info, | |
151 | - int nonatomic, int wait) | |
152 | -{ | |
153 | - struct call_data_struct data; | |
154 | - int cpus = num_online_cpus() - 1; | |
155 | - | |
156 | - if (!cpus) | |
157 | - return; | |
158 | - | |
159 | - data.func = func; | |
160 | - data.info = info; | |
161 | - atomic_set(&data.started, 0); | |
162 | - data.wait = wait; | |
163 | - if (wait) | |
164 | - atomic_set(&data.finished, 0); | |
165 | - | |
166 | - call_data = &data; | |
167 | - mb(); | |
168 | - | |
169 | - /* Send a message to all other CPUs and wait for them to respond */ | |
170 | - send_IPI_allbutself(CALL_FUNCTION_VECTOR); | |
171 | - | |
172 | - /* Wait for response */ | |
173 | - while (atomic_read(&data.started) != cpus) | |
174 | - cpu_relax(); | |
175 | - | |
176 | - if (wait) | |
177 | - while (atomic_read(&data.finished) != cpus) | |
178 | - cpu_relax(); | |
179 | -} | |
180 | - | |
181 | - | |
182 | -/** | |
183 | - * smp_call_function_mask(): Run a function on a set of other CPUs. | |
184 | - * @mask: The set of cpus to run on. Must not include the current cpu. | |
185 | - * @func: The function to run. This must be fast and non-blocking. | |
186 | - * @info: An arbitrary pointer to pass to the function. | |
187 | - * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
188 | - * | |
189 | - * Returns 0 on success, else a negative status code. | |
190 | - * | |
191 | - * If @wait is true, then returns once @func has returned; otherwise | |
192 | - * it returns just before the target cpu calls @func. | |
193 | - * | |
194 | - * You must not call this function with disabled interrupts or from a | |
195 | - * hardware interrupt handler or from a bottom half handler. | |
196 | - */ | |
197 | -static int | |
198 | -native_smp_call_function_mask(cpumask_t mask, | |
199 | - void (*func)(void *), void *info, | |
200 | - int wait) | |
201 | -{ | |
202 | - struct call_data_struct data; | |
203 | 131 | cpumask_t allbutself; |
204 | - int cpus; | |
205 | 132 | |
206 | - /* Can deadlock when called with interrupts disabled */ | |
207 | - WARN_ON(irqs_disabled()); | |
208 | - | |
209 | - /* Holding any lock stops cpus from going down. */ | |
210 | - spin_lock(&call_lock); | |
211 | - | |
212 | 133 | allbutself = cpu_online_map; |
213 | 134 | cpu_clear(smp_processor_id(), allbutself); |
214 | 135 | |
215 | - cpus_and(mask, mask, allbutself); | |
216 | - cpus = cpus_weight(mask); | |
217 | - | |
218 | - if (!cpus) { | |
219 | - spin_unlock(&call_lock); | |
220 | - return 0; | |
221 | - } | |
222 | - | |
223 | - data.func = func; | |
224 | - data.info = info; | |
225 | - atomic_set(&data.started, 0); | |
226 | - data.wait = wait; | |
227 | - if (wait) | |
228 | - atomic_set(&data.finished, 0); | |
229 | - | |
230 | - call_data = &data; | |
231 | - wmb(); | |
232 | - | |
233 | - /* Send a message to other CPUs */ | |
234 | 136 | if (cpus_equal(mask, allbutself) && |
235 | 137 | cpus_equal(cpu_online_map, cpu_callout_map)) |
236 | 138 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); |
237 | 139 | else |
238 | 140 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); |
239 | - | |
240 | - /* Wait for response */ | |
241 | - while (atomic_read(&data.started) != cpus) | |
242 | - cpu_relax(); | |
243 | - | |
244 | - if (wait) | |
245 | - while (atomic_read(&data.finished) != cpus) | |
246 | - cpu_relax(); | |
247 | - spin_unlock(&call_lock); | |
248 | - | |
249 | - return 0; | |
250 | 141 | } |
251 | 142 | |
252 | 143 | static void stop_this_cpu(void *dummy) |
253 | 144 | |
254 | 145 | |
... | ... | @@ -268,18 +159,13 @@ |
268 | 159 | |
269 | 160 | static void native_smp_send_stop(void) |
270 | 161 | { |
271 | - int nolock; | |
272 | 162 | unsigned long flags; |
273 | 163 | |
274 | 164 | if (reboot_force) |
275 | 165 | return; |
276 | 166 | |
277 | - /* Don't deadlock on the call lock in panic */ | |
278 | - nolock = !spin_trylock(&call_lock); | |
167 | + smp_call_function(stop_this_cpu, NULL, 0, 0); | |
279 | 168 | local_irq_save(flags); |
280 | - __smp_call_function(stop_this_cpu, NULL, 0, 0); | |
281 | - if (!nolock) | |
282 | - spin_unlock(&call_lock); | |
283 | 169 | disable_local_APIC(); |
284 | 170 | local_irq_restore(flags); |
285 | 171 | } |
286 | 172 | |
287 | 173 | |
288 | 174 | |
289 | 175 | |
... | ... | @@ -301,33 +187,28 @@ |
301 | 187 | |
302 | 188 | void smp_call_function_interrupt(struct pt_regs *regs) |
303 | 189 | { |
304 | - void (*func) (void *info) = call_data->func; | |
305 | - void *info = call_data->info; | |
306 | - int wait = call_data->wait; | |
307 | - | |
308 | 190 | ack_APIC_irq(); |
309 | - /* | |
310 | - * Notify initiating CPU that I've grabbed the data and am | |
311 | - * about to execute the function | |
312 | - */ | |
313 | - mb(); | |
314 | - atomic_inc(&call_data->started); | |
315 | - /* | |
316 | - * At this point the info structure may be out of scope unless wait==1 | |
317 | - */ | |
318 | 191 | irq_enter(); |
319 | - (*func)(info); | |
192 | + generic_smp_call_function_interrupt(); | |
320 | 193 | #ifdef CONFIG_X86_32 |
321 | 194 | __get_cpu_var(irq_stat).irq_call_count++; |
322 | 195 | #else |
323 | 196 | add_pda(irq_call_count, 1); |
324 | 197 | #endif |
325 | 198 | irq_exit(); |
199 | +} | |
326 | 200 | |
327 | - if (wait) { | |
328 | - mb(); | |
329 | - atomic_inc(&call_data->finished); | |
330 | - } | |
201 | +void smp_call_function_single_interrupt(void) | |
202 | +{ | |
203 | + ack_APIC_irq(); | |
204 | + irq_enter(); | |
205 | + generic_smp_call_function_single_interrupt(); | |
206 | +#ifdef CONFIG_X86_32 | |
207 | + __get_cpu_var(irq_stat).irq_call_count++; | |
208 | +#else | |
209 | + add_pda(irq_call_count, 1); | |
210 | +#endif | |
211 | + irq_exit(); | |
331 | 212 | } |
332 | 213 | |
333 | 214 | struct smp_ops smp_ops = { |
... | ... | @@ -338,7 +219,9 @@ |
338 | 219 | |
339 | 220 | .smp_send_stop = native_smp_send_stop, |
340 | 221 | .smp_send_reschedule = native_smp_send_reschedule, |
341 | - .smp_call_function_mask = native_smp_call_function_mask, | |
222 | + | |
223 | + .send_call_func_ipi = native_send_call_func_ipi, | |
224 | + .send_call_func_single_ipi = native_send_call_func_single_ipi, | |
342 | 225 | }; |
343 | 226 | EXPORT_SYMBOL_GPL(smp_ops); |
arch/x86/kernel/smpboot.c
... | ... | @@ -345,7 +345,7 @@ |
345 | 345 | * lock helps us to not include this cpu in a currently in progress |
346 | 346 | * smp_call_function(). |
347 | 347 | */ |
348 | - lock_ipi_call_lock(); | |
348 | + ipi_call_lock_irq(); | |
349 | 349 | #ifdef CONFIG_X86_64 |
350 | 350 | spin_lock(&vector_lock); |
351 | 351 | |
... | ... | @@ -357,7 +357,7 @@ |
357 | 357 | spin_unlock(&vector_lock); |
358 | 358 | #endif |
359 | 359 | cpu_set(smp_processor_id(), cpu_online_map); |
360 | - unlock_ipi_call_lock(); | |
360 | + ipi_call_unlock_irq(); | |
361 | 361 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
362 | 362 | |
363 | 363 | setup_secondary_clock(); |
arch/x86/kernel/smpcommon.c
... | ... | @@ -25,60 +25,4 @@ |
25 | 25 | per_cpu(cpu_number, cpu) = cpu; |
26 | 26 | } |
27 | 27 | #endif |
28 | - | |
29 | -/** | |
30 | - * smp_call_function(): Run a function on all other CPUs. | |
31 | - * @func: The function to run. This must be fast and non-blocking. | |
32 | - * @info: An arbitrary pointer to pass to the function. | |
33 | - * @nonatomic: Unused. | |
34 | - * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
35 | - * | |
36 | - * Returns 0 on success, else a negative status code. | |
37 | - * | |
38 | - * If @wait is true, then returns once @func has returned; otherwise | |
39 | - * it returns just before the target cpu calls @func. | |
40 | - * | |
41 | - * You must not call this function with disabled interrupts or from a | |
42 | - * hardware interrupt handler or from a bottom half handler. | |
43 | - */ | |
44 | -int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | |
45 | - int wait) | |
46 | -{ | |
47 | - return smp_call_function_mask(cpu_online_map, func, info, wait); | |
48 | -} | |
49 | -EXPORT_SYMBOL(smp_call_function); | |
50 | - | |
51 | -/** | |
52 | - * smp_call_function_single - Run a function on a specific CPU | |
53 | - * @cpu: The target CPU. Cannot be the calling CPU. | |
54 | - * @func: The function to run. This must be fast and non-blocking. | |
55 | - * @info: An arbitrary pointer to pass to the function. | |
56 | - * @nonatomic: Unused. | |
57 | - * @wait: If true, wait until function has completed on other CPUs. | |
58 | - * | |
59 | - * Returns 0 on success, else a negative status code. | |
60 | - * | |
61 | - * If @wait is true, then returns once @func has returned; otherwise | |
62 | - * it returns just before the target cpu calls @func. | |
63 | - */ | |
64 | -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |
65 | - int nonatomic, int wait) | |
66 | -{ | |
67 | - /* prevent preemption and reschedule on another processor */ | |
68 | - int ret; | |
69 | - int me = get_cpu(); | |
70 | - if (cpu == me) { | |
71 | - local_irq_disable(); | |
72 | - func(info); | |
73 | - local_irq_enable(); | |
74 | - put_cpu(); | |
75 | - return 0; | |
76 | - } | |
77 | - | |
78 | - ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | |
79 | - | |
80 | - put_cpu(); | |
81 | - return ret; | |
82 | -} | |
83 | -EXPORT_SYMBOL(smp_call_function_single); |
arch/x86/mach-voyager/voyager_smp.c
... | ... | @@ -955,94 +955,24 @@ |
955 | 955 | halt(); |
956 | 956 | } |
957 | 957 | |
958 | -static DEFINE_SPINLOCK(call_lock); | |
959 | - | |
960 | -struct call_data_struct { | |
961 | - void (*func) (void *info); | |
962 | - void *info; | |
963 | - volatile unsigned long started; | |
964 | - volatile unsigned long finished; | |
965 | - int wait; | |
966 | -}; | |
967 | - | |
968 | -static struct call_data_struct *call_data; | |
969 | - | |
970 | 958 | /* execute a thread on a new CPU. The function to be called must be |
971 | 959 | * previously set up. This is used to schedule a function for |
972 | 960 | * execution on all CPUs - set up the function then broadcast a |
973 | 961 | * function_interrupt CPI to come here on each CPU */ |
974 | 962 | static void smp_call_function_interrupt(void) |
975 | 963 | { |
976 | - void (*func) (void *info) = call_data->func; | |
977 | - void *info = call_data->info; | |
978 | - /* must take copy of wait because call_data may be replaced | |
979 | - * unless the function is waiting for us to finish */ | |
980 | - int wait = call_data->wait; | |
981 | - __u8 cpu = smp_processor_id(); | |
982 | - | |
983 | - /* | |
984 | - * Notify initiating CPU that I've grabbed the data and am | |
985 | - * about to execute the function | |
986 | - */ | |
987 | - mb(); | |
988 | - if (!test_and_clear_bit(cpu, &call_data->started)) { | |
989 | - /* If the bit wasn't set, this could be a replay */ | |
990 | - printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion" | |
991 | - " with no call pending\n", cpu); | |
992 | - return; | |
993 | - } | |
994 | - /* | |
995 | - * At this point the info structure may be out of scope unless wait==1 | |
996 | - */ | |
997 | 964 | irq_enter(); |
998 | - (*func) (info); | |
965 | + generic_smp_call_function_interrupt(); | |
999 | 966 | __get_cpu_var(irq_stat).irq_call_count++; |
1000 | 967 | irq_exit(); |
1001 | - if (wait) { | |
1002 | - mb(); | |
1003 | - clear_bit(cpu, &call_data->finished); | |
1004 | - } | |
1005 | 968 | } |
1006 | 969 | |
1007 | -static int | |
1008 | -voyager_smp_call_function_mask(cpumask_t cpumask, | |
1009 | - void (*func) (void *info), void *info, int wait) | |
970 | +static void smp_call_function_single_interrupt(void) | |
1010 | 971 | { |
1011 | - struct call_data_struct data; | |
1012 | - u32 mask = cpus_addr(cpumask)[0]; | |
1013 | - | |
1014 | - mask &= ~(1 << smp_processor_id()); | |
1015 | - | |
1016 | - if (!mask) | |
1017 | - return 0; | |
1018 | - | |
1019 | - /* Can deadlock when called with interrupts disabled */ | |
1020 | - WARN_ON(irqs_disabled()); | |
1021 | - | |
1022 | - data.func = func; | |
1023 | - data.info = info; | |
1024 | - data.started = mask; | |
1025 | - data.wait = wait; | |
1026 | - if (wait) | |
1027 | - data.finished = mask; | |
1028 | - | |
1029 | - spin_lock(&call_lock); | |
1030 | - call_data = &data; | |
1031 | - wmb(); | |
1032 | - /* Send a message to all other CPUs and wait for them to respond */ | |
1033 | - send_CPI(mask, VIC_CALL_FUNCTION_CPI); | |
1034 | - | |
1035 | - /* Wait for response */ | |
1036 | - while (data.started) | |
1037 | - barrier(); | |
1038 | - | |
1039 | - if (wait) | |
1040 | - while (data.finished) | |
1041 | - barrier(); | |
1042 | - | |
1043 | - spin_unlock(&call_lock); | |
1044 | - | |
1045 | - return 0; | |
972 | + irq_enter(); | |
973 | + generic_smp_call_function_single_interrupt(); | |
974 | + __get_cpu_var(irq_stat).irq_call_count++; | |
975 | + irq_exit(); | |
1046 | 976 | } |
1047 | 977 | |
1048 | 978 | /* Sorry about the name. In an APIC based system, the APICs |
... | ... | @@ -1099,6 +1029,12 @@ |
1099 | 1029 | smp_call_function_interrupt(); |
1100 | 1030 | } |
1101 | 1031 | |
1032 | +void smp_qic_call_function_single_interrupt(struct pt_regs *regs) | |
1033 | +{ | |
1034 | + ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI); | |
1035 | + smp_call_function_single_interrupt(); | |
1036 | +} | |
1037 | + | |
1102 | 1038 | void smp_vic_cpi_interrupt(struct pt_regs *regs) |
1103 | 1039 | { |
1104 | 1040 | struct pt_regs *old_regs = set_irq_regs(regs); |
... | ... | @@ -1119,6 +1055,8 @@ |
1119 | 1055 | smp_enable_irq_interrupt(); |
1120 | 1056 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) |
1121 | 1057 | smp_call_function_interrupt(); |
1058 | + if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu])) | |
1059 | + smp_call_function_single_interrupt(); | |
1122 | 1060 | set_irq_regs(old_regs); |
1123 | 1061 | } |
1124 | 1062 | |
... | ... | @@ -1862,6 +1800,8 @@ |
1862 | 1800 | |
1863 | 1801 | .smp_send_stop = voyager_smp_send_stop, |
1864 | 1802 | .smp_send_reschedule = voyager_smp_send_reschedule, |
1865 | - .smp_call_function_mask = voyager_smp_call_function_mask, | |
1803 | + | |
1804 | + .send_call_func_ipi = native_send_call_func_ipi, | |
1805 | + .send_call_func_single_ipi = native_send_call_func_single_ipi, | |
1866 | 1806 | }; |
arch/x86/xen/enlighten.c
... | ... | @@ -1108,7 +1108,9 @@ |
1108 | 1108 | |
1109 | 1109 | .smp_send_stop = xen_smp_send_stop, |
1110 | 1110 | .smp_send_reschedule = xen_smp_send_reschedule, |
1111 | - .smp_call_function_mask = xen_smp_call_function_mask, | |
1111 | + | |
1112 | + .send_call_func_ipi = xen_smp_send_call_function_ipi, | |
1113 | + .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, | |
1112 | 1114 | }; |
1113 | 1115 | #endif /* CONFIG_SMP */ |
1114 | 1116 |
arch/x86/xen/mmu.c
arch/x86/xen/smp.c
... | ... | @@ -36,28 +36,15 @@ |
36 | 36 | #include "mmu.h" |
37 | 37 | |
38 | 38 | static cpumask_t xen_cpu_initialized_map; |
39 | -static DEFINE_PER_CPU(int, resched_irq) = -1; | |
40 | -static DEFINE_PER_CPU(int, callfunc_irq) = -1; | |
39 | + | |
40 | +static DEFINE_PER_CPU(int, resched_irq); | |
41 | +static DEFINE_PER_CPU(int, callfunc_irq); | |
42 | +static DEFINE_PER_CPU(int, callfuncsingle_irq); | |
41 | 43 | static DEFINE_PER_CPU(int, debug_irq) = -1; |
42 | 44 | |
43 | -/* | |
44 | - * Structure and data for smp_call_function(). This is designed to minimise | |
45 | - * static memory requirements. It also looks cleaner. | |
46 | - */ | |
47 | -static DEFINE_SPINLOCK(call_lock); | |
48 | - | |
49 | -struct call_data_struct { | |
50 | - void (*func) (void *info); | |
51 | - void *info; | |
52 | - atomic_t started; | |
53 | - atomic_t finished; | |
54 | - int wait; | |
55 | -}; | |
56 | - | |
57 | 45 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); |
46 | +static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | |
58 | 47 | |
59 | -static struct call_data_struct *call_data; | |
60 | - | |
61 | 48 | /* |
62 | 49 | * Reschedule call back. Nothing to do, |
63 | 50 | * all the work is done automatically when |
... | ... | @@ -122,6 +109,17 @@ |
122 | 109 | goto fail; |
123 | 110 | per_cpu(debug_irq, cpu) = rc; |
124 | 111 | |
112 | + callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); | |
113 | + rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, | |
114 | + cpu, | |
115 | + xen_call_function_single_interrupt, | |
116 | + IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | |
117 | + callfunc_name, | |
118 | + NULL); | |
119 | + if (rc < 0) | |
120 | + goto fail; | |
121 | + per_cpu(callfuncsingle_irq, cpu) = rc; | |
122 | + | |
125 | 123 | return 0; |
126 | 124 | |
127 | 125 | fail: |
... | ... | @@ -131,6 +129,9 @@ |
131 | 129 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); |
132 | 130 | if (per_cpu(debug_irq, cpu) >= 0) |
133 | 131 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); |
132 | + if (per_cpu(callfuncsingle_irq, cpu) >= 0) | |
133 | + unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | |
134 | + | |
134 | 135 | return rc; |
135 | 136 | } |
136 | 137 | |
... | ... | @@ -338,7 +339,6 @@ |
338 | 339 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); |
339 | 340 | } |
340 | 341 | |
341 | - | |
342 | 342 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) |
343 | 343 | { |
344 | 344 | unsigned cpu; |
345 | 345 | |
346 | 346 | |
347 | 347 | |
348 | 348 | |
349 | 349 | |
350 | 350 | |
351 | 351 | |
... | ... | @@ -349,84 +349,43 @@ |
349 | 349 | xen_send_IPI_one(cpu, vector); |
350 | 350 | } |
351 | 351 | |
352 | -static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | |
352 | +void xen_smp_send_call_function_ipi(cpumask_t mask) | |
353 | 353 | { |
354 | - void (*func) (void *info) = call_data->func; | |
355 | - void *info = call_data->info; | |
356 | - int wait = call_data->wait; | |
354 | + int cpu; | |
357 | 355 | |
358 | - /* | |
359 | - * Notify initiating CPU that I've grabbed the data and am | |
360 | - * about to execute the function | |
361 | - */ | |
362 | - mb(); | |
363 | - atomic_inc(&call_data->started); | |
364 | - /* | |
365 | - * At this point the info structure may be out of scope unless wait==1 | |
366 | - */ | |
356 | + xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | |
357 | + | |
358 | + /* Make sure other vcpus get a chance to run if they need to. */ | |
359 | + for_each_cpu_mask(cpu, mask) { | |
360 | + if (xen_vcpu_stolen(cpu)) { | |
361 | + HYPERVISOR_sched_op(SCHEDOP_yield, 0); | |
362 | + break; | |
363 | + } | |
364 | + } | |
365 | +} | |
366 | + | |
367 | +void xen_smp_send_call_function_single_ipi(int cpu) | |
368 | +{ | |
369 | + xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); | |
370 | +} | |
371 | + | |
372 | +static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | |
373 | +{ | |
367 | 374 | irq_enter(); |
368 | - (*func)(info); | |
375 | + generic_smp_call_function_interrupt(); | |
369 | 376 | __get_cpu_var(irq_stat).irq_call_count++; |
370 | 377 | irq_exit(); |
371 | 378 | |
372 | - if (wait) { | |
373 | - mb(); /* commit everything before setting finished */ | |
374 | - atomic_inc(&call_data->finished); | |
375 | - } | |
376 | - | |
377 | 379 | return IRQ_HANDLED; |
378 | 380 | } |
379 | 381 | |
380 | -int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | |
381 | - void *info, int wait) | |
382 | +static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |
382 | 383 | { |
383 | - struct call_data_struct data; | |
384 | - int cpus, cpu; | |
385 | - bool yield; | |
384 | + irq_enter(); | |
385 | + generic_smp_call_function_single_interrupt(); | |
386 | + __get_cpu_var(irq_stat).irq_call_count++; | |
387 | + irq_exit(); | |
386 | 388 | |
387 | - /* Holding any lock stops cpus from going down. */ | |
388 | - spin_lock(&call_lock); | |
389 | - | |
390 | - cpu_clear(smp_processor_id(), mask); | |
391 | - | |
392 | - cpus = cpus_weight(mask); | |
393 | - if (!cpus) { | |
394 | - spin_unlock(&call_lock); | |
395 | - return 0; | |
396 | - } | |
397 | - | |
398 | - /* Can deadlock when called with interrupts disabled */ | |
399 | - WARN_ON(irqs_disabled()); | |
400 | - | |
401 | - data.func = func; | |
402 | - data.info = info; | |
403 | - atomic_set(&data.started, 0); | |
404 | - data.wait = wait; | |
405 | - if (wait) | |
406 | - atomic_set(&data.finished, 0); | |
407 | - | |
408 | - call_data = &data; | |
409 | - mb(); /* write everything before IPI */ | |
410 | - | |
411 | - /* Send a message to other CPUs and wait for them to respond */ | |
412 | - xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | |
413 | - | |
414 | - /* Make sure other vcpus get a chance to run if they need to. */ | |
415 | - yield = false; | |
416 | - for_each_cpu_mask(cpu, mask) | |
417 | - if (xen_vcpu_stolen(cpu)) | |
418 | - yield = true; | |
419 | - | |
420 | - if (yield) | |
421 | - HYPERVISOR_sched_op(SCHEDOP_yield, 0); | |
422 | - | |
423 | - /* Wait for response */ | |
424 | - while (atomic_read(&data.started) != cpus || | |
425 | - (wait && atomic_read(&data.finished) != cpus)) | |
426 | - cpu_relax(); | |
427 | - | |
428 | - spin_unlock(&call_lock); | |
429 | - | |
430 | - return 0; | |
389 | + return IRQ_HANDLED; | |
431 | 390 | } |
arch/x86/xen/xen-ops.h
... | ... | @@ -46,13 +46,8 @@ |
46 | 46 | |
47 | 47 | void xen_smp_send_stop(void); |
48 | 48 | void xen_smp_send_reschedule(int cpu); |
49 | -int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic, | |
50 | - int wait); | |
51 | -int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |
52 | - int nonatomic, int wait); | |
53 | - | |
54 | -int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | |
55 | - void *info, int wait); | |
49 | +void xen_smp_send_call_function_ipi(cpumask_t mask); | |
50 | +void xen_smp_send_call_function_single_ipi(int cpu); | |
56 | 51 | |
57 | 52 | |
58 | 53 | /* Declare an asm function, along with symbols needed to make it |
include/asm-x86/hw_irq_32.h
include/asm-x86/hw_irq_64.h
... | ... | @@ -68,6 +68,7 @@ |
68 | 68 | #define ERROR_APIC_VECTOR 0xfe |
69 | 69 | #define RESCHEDULE_VECTOR 0xfd |
70 | 70 | #define CALL_FUNCTION_VECTOR 0xfc |
71 | +#define CALL_FUNCTION_SINGLE_VECTOR 0xfb | |
71 | 72 | /* fb free - please don't readd KDB here because it's useless |
72 | 73 | (hint - think what a NMI bit does to a vector) */ |
73 | 74 | #define THERMAL_APIC_VECTOR 0xfa |
... | ... | @@ -102,6 +103,7 @@ |
102 | 103 | void error_interrupt(void); |
103 | 104 | void reschedule_interrupt(void); |
104 | 105 | void call_function_interrupt(void); |
106 | +void call_function_single_interrupt(void); | |
105 | 107 | void irq_move_cleanup_interrupt(void); |
106 | 108 | void invalidate_interrupt0(void); |
107 | 109 | void invalidate_interrupt1(void); |
include/asm-x86/mach-default/entry_arch.h
... | ... | @@ -13,6 +13,7 @@ |
13 | 13 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) |
14 | 14 | BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) |
15 | 15 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) |
16 | +BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) | |
16 | 17 | #endif |
17 | 18 | |
18 | 19 | /* |
include/asm-x86/mach-default/irq_vectors.h
include/asm-x86/mach-voyager/entry_arch.h
... | ... | @@ -23,4 +23,5 @@ |
23 | 23 | BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); |
24 | 24 | BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); |
25 | 25 | BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); |
26 | +BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI); |
include/asm-x86/mach-voyager/irq_vectors.h
... | ... | @@ -33,6 +33,7 @@ |
33 | 33 | #define VIC_RESCHEDULE_CPI 4 |
34 | 34 | #define VIC_ENABLE_IRQ_CPI 5 |
35 | 35 | #define VIC_CALL_FUNCTION_CPI 6 |
36 | +#define VIC_CALL_FUNCTION_SINGLE_CPI 7 | |
36 | 37 | |
37 | 38 | /* Now the QIC CPIs: Since we don't need the two initial levels, |
38 | 39 | * these are 2 less than the VIC CPIs */ |
39 | 40 | |
... | ... | @@ -42,9 +43,10 @@ |
42 | 43 | #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) |
43 | 44 | #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) |
44 | 45 | #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) |
46 | +#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET) | |
45 | 47 | |
46 | 48 | #define VIC_START_FAKE_CPI VIC_TIMER_CPI |
47 | -#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI | |
49 | +#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI | |
48 | 50 | |
49 | 51 | /* this is the SYS_INT CPI. */ |
50 | 52 | #define VIC_SYS_INT 8 |
include/asm-x86/smp.h
... | ... | @@ -59,9 +59,9 @@ |
59 | 59 | |
60 | 60 | void (*smp_send_stop)(void); |
61 | 61 | void (*smp_send_reschedule)(int cpu); |
62 | - int (*smp_call_function_mask)(cpumask_t mask, | |
63 | - void (*func)(void *info), void *info, | |
64 | - int wait); | |
62 | + | |
63 | + void (*send_call_func_ipi)(cpumask_t mask); | |
64 | + void (*send_call_func_single_ipi)(int cpu); | |
65 | 65 | }; |
66 | 66 | |
67 | 67 | /* Globals due to paravirt */ |
68 | 68 | |
69 | 69 | |
70 | 70 | |
... | ... | @@ -103,17 +103,22 @@ |
103 | 103 | smp_ops.smp_send_reschedule(cpu); |
104 | 104 | } |
105 | 105 | |
106 | -static inline int smp_call_function_mask(cpumask_t mask, | |
107 | - void (*func) (void *info), void *info, | |
108 | - int wait) | |
106 | +static inline void arch_send_call_function_single_ipi(int cpu) | |
109 | 107 | { |
110 | - return smp_ops.smp_call_function_mask(mask, func, info, wait); | |
108 | + smp_ops.send_call_func_single_ipi(cpu); | |
111 | 109 | } |
112 | 110 | |
111 | +static inline void arch_send_call_function_ipi(cpumask_t mask) | |
112 | +{ | |
113 | + smp_ops.send_call_func_ipi(mask); | |
114 | +} | |
115 | + | |
113 | 116 | void native_smp_prepare_boot_cpu(void); |
114 | 117 | void native_smp_prepare_cpus(unsigned int max_cpus); |
115 | 118 | void native_smp_cpus_done(unsigned int max_cpus); |
116 | 119 | int native_cpu_up(unsigned int cpunum); |
120 | +void native_send_call_func_ipi(cpumask_t mask); | |
121 | +void native_send_call_func_single_ipi(int cpu); | |
117 | 122 | |
118 | 123 | extern int __cpu_disable(void); |
119 | 124 | extern void __cpu_die(unsigned int cpu); |
... | ... | @@ -202,8 +207,6 @@ |
202 | 207 | #endif |
203 | 208 | |
204 | 209 | extern void smp_alloc_memory(void); |
205 | -extern void lock_ipi_call_lock(void); | |
206 | -extern void unlock_ipi_call_lock(void); | |
207 | 210 | #endif /* __ASSEMBLY__ */ |
208 | 211 | #endif |