Commit 23d72bfd8f9f24aa9efafed3586a99f5669c23d7
Committed by
Benjamin Herrenschmidt
1 parent
17f9c8a73b
Exists in
master
and in
7 other branches
powerpc: Consolidate ipi message mux and demux
Consolidate the mux and demux of ipi messages into smp.c and call a new smp_ops callback to actually trigger the ipi. The powerpc architecture code is optimised for having 4 distinct ipi triggers, which are mapped to 4 distinct messages (ipi many, ipi single, scheduler ipi, and enter debugger). However, several interrupt controllers only provide a single software triggered interrupt that can be delivered to each cpu. To resolve this limitation, each smp_ops implementation created a per-cpu variable that is manipulated with atomic bitops. Since these lines will be contended they are optimialy marked as shared_aligned and take a full cache line for each cpu. Distro kernels may have 2 or 3 of these in their config, each taking per-cpu space even though at most one will be in use. This consolidation removes smp_message_recv and replaces the single call actions cases with direct calls from the common message recognition loop. The complicated debugger ipi case with its muxed crash handling code is moved to debug_ipi_action which is now called from the demux code (instead of the multi-message action calling smp_message_recv). I put a call to reschedule_action to increase the likelyhood of correctly merging the anticipated scheduler_ipi() hook coming from the scheduler tree; that single required call can be inlined later. The actual message decode is a copy of the old pseries xics code with its memory barriers and cache line spacing, augmented with a per-cpu unsigned long based on the book-e doorbell code. The optional data is set via a callback from the implementation and is passed to the new cause-ipi hook along with the logical cpu number. While currently only the doorbell implemntation uses this data it should be almost zero cost to retrieve and pass it -- it adds a single register load for the argument from the same cache line to which we just completed a store and the register is dead on return from the call. I extended the data element from unsigned int to unsigned long in case some other code wanted to associate a pointer. The doorbell check_self is replaced by a call to smp_muxed_ipi_resend, conditioned on the CPU_DBELL feature. The ifdef guard could be relaxed to CONFIG_SMP but I left it with BOOKE for now. Also, the doorbell interrupt vector for book-e was not calling irq_enter and irq_exit, which throws off cpu accounting and causes code to not realize it is running in interrupt context. Add the missing calls. Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Showing 17 changed files with 126 additions and 176 deletions Side-by-side Diff
- arch/powerpc/include/asm/dbell.h
- arch/powerpc/include/asm/smp.h
- arch/powerpc/include/asm/xics.h
- arch/powerpc/kernel/dbell.c
- arch/powerpc/kernel/irq.c
- arch/powerpc/kernel/smp.c
- arch/powerpc/platforms/85xx/smp.c
- arch/powerpc/platforms/cell/interrupt.c
- arch/powerpc/platforms/iseries/irq.c
- arch/powerpc/platforms/iseries/smp.c
- arch/powerpc/platforms/iseries/smp.h
- arch/powerpc/platforms/powermac/smp.c
- arch/powerpc/platforms/pseries/smp.c
- arch/powerpc/platforms/wsp/smp.c
- arch/powerpc/sysdev/xics/icp-hv.c
- arch/powerpc/sysdev/xics/icp-native.c
- arch/powerpc/sysdev/xics/xics-common.c
arch/powerpc/include/asm/dbell.h
... | ... | @@ -27,9 +27,8 @@ |
27 | 27 | PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */ |
28 | 28 | }; |
29 | 29 | |
30 | -extern void doorbell_message_pass(int cpu, int msg); | |
30 | +extern void doorbell_cause_ipi(int cpu, unsigned long data); | |
31 | 31 | extern void doorbell_exception(struct pt_regs *regs); |
32 | -extern void doorbell_check_self(void); | |
33 | 32 | extern void doorbell_setup_this_cpu(void); |
34 | 33 | |
35 | 34 | static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) |
arch/powerpc/include/asm/smp.h
... | ... | @@ -20,6 +20,7 @@ |
20 | 20 | #include <linux/threads.h> |
21 | 21 | #include <linux/cpumask.h> |
22 | 22 | #include <linux/kernel.h> |
23 | +#include <linux/irqreturn.h> | |
23 | 24 | |
24 | 25 | #ifndef __ASSEMBLY__ |
25 | 26 | |
... | ... | @@ -37,6 +38,7 @@ |
37 | 38 | |
38 | 39 | struct smp_ops_t { |
39 | 40 | void (*message_pass)(int cpu, int msg); |
41 | + void (*cause_ipi)(int cpu, unsigned long data); | |
40 | 42 | int (*probe)(void); |
41 | 43 | int (*kick_cpu)(int nr); |
42 | 44 | void (*setup_cpu)(int nr); |
... | ... | @@ -49,7 +51,6 @@ |
49 | 51 | }; |
50 | 52 | |
51 | 53 | extern void smp_send_debugger_break(void); |
52 | -extern void smp_message_recv(int); | |
53 | 54 | extern void start_secondary_resume(void); |
54 | 55 | extern void __devinit smp_generic_give_timebase(void); |
55 | 56 | extern void __devinit smp_generic_take_timebase(void); |
56 | 57 | |
... | ... | @@ -109,13 +110,16 @@ |
109 | 110 | #define PPC_MSG_CALL_FUNC_SINGLE 2 |
110 | 111 | #define PPC_MSG_DEBUGGER_BREAK 3 |
111 | 112 | |
112 | -/* | |
113 | - * irq controllers that have dedicated ipis per message and don't | |
114 | - * need additional code in the action handler may use this | |
115 | - */ | |
113 | +/* for irq controllers that have dedicated ipis per message (4) */ | |
116 | 114 | extern int smp_request_message_ipi(int virq, int message); |
117 | 115 | extern const char *smp_ipi_name[]; |
118 | 116 | |
117 | +/* for irq controllers with only a single ipi */ | |
118 | +extern void smp_muxed_ipi_set_data(int cpu, unsigned long data); | |
119 | +extern void smp_muxed_ipi_message_pass(int cpu, int msg); | |
120 | +extern void smp_muxed_ipi_resend(void); | |
121 | +extern irqreturn_t smp_ipi_demux(void); | |
122 | + | |
119 | 123 | void smp_init_iSeries(void); |
120 | 124 | void smp_init_pSeries(void); |
121 | 125 | void smp_init_cell(void); |
... | ... | @@ -184,6 +188,8 @@ |
184 | 188 | extern unsigned long __secondary_hold_spinloop; |
185 | 189 | extern unsigned long __secondary_hold_acknowledge; |
186 | 190 | extern char __secondary_hold; |
191 | + | |
192 | +extern irqreturn_t debug_ipi_action(int irq, void *data); | |
187 | 193 | |
188 | 194 | #endif /* __ASSEMBLY__ */ |
189 | 195 |
arch/powerpc/include/asm/xics.h
arch/powerpc/kernel/dbell.c
... | ... | @@ -13,65 +13,35 @@ |
13 | 13 | #include <linux/kernel.h> |
14 | 14 | #include <linux/smp.h> |
15 | 15 | #include <linux/threads.h> |
16 | -#include <linux/percpu.h> | |
16 | +#include <linux/hardirq.h> | |
17 | 17 | |
18 | 18 | #include <asm/dbell.h> |
19 | 19 | #include <asm/irq_regs.h> |
20 | 20 | |
21 | 21 | #ifdef CONFIG_SMP |
22 | -struct doorbell_cpu_info { | |
23 | - unsigned long messages; /* current messages bits */ | |
24 | - unsigned int tag; /* tag value */ | |
25 | -}; | |
26 | - | |
27 | -static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info); | |
28 | - | |
29 | 22 | void doorbell_setup_this_cpu(void) |
30 | 23 | { |
31 | - struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); | |
24 | + unsigned long tag = mfspr(SPRN_PIR) & 0x3fff; | |
32 | 25 | |
33 | - info->messages = 0; | |
34 | - info->tag = mfspr(SPRN_PIR) & 0x3fff; | |
26 | + smp_muxed_ipi_set_data(smp_processor_id(), tag); | |
35 | 27 | } |
36 | 28 | |
37 | -void doorbell_message_pass(int cpu, int msg) | |
29 | +void doorbell_cause_ipi(int cpu, unsigned long data) | |
38 | 30 | { |
39 | - struct doorbell_cpu_info *info; | |
40 | - | |
41 | - info = &per_cpu(doorbell_cpu_info, cpu); | |
42 | - set_bit(msg, &info->messages); | |
43 | - ppc_msgsnd(PPC_DBELL, 0, info->tag); | |
31 | + ppc_msgsnd(PPC_DBELL, 0, data); | |
44 | 32 | } |
45 | 33 | |
46 | 34 | void doorbell_exception(struct pt_regs *regs) |
47 | 35 | { |
48 | 36 | struct pt_regs *old_regs = set_irq_regs(regs); |
49 | - struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); | |
50 | - int msg; | |
51 | 37 | |
52 | - /* Warning: regs can be NULL when called from irq enable */ | |
38 | + irq_enter(); | |
53 | 39 | |
54 | - if (!info->messages || (num_online_cpus() < 2)) | |
55 | - goto out; | |
40 | + smp_ipi_demux(); | |
56 | 41 | |
57 | - for (msg = 0; msg < 4; msg++) | |
58 | - if (test_and_clear_bit(msg, &info->messages)) | |
59 | - smp_message_recv(msg); | |
60 | - | |
61 | -out: | |
42 | + irq_exit(); | |
62 | 43 | set_irq_regs(old_regs); |
63 | 44 | } |
64 | - | |
65 | -void doorbell_check_self(void) | |
66 | -{ | |
67 | - struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); | |
68 | - | |
69 | - if (!info->messages) | |
70 | - return; | |
71 | - | |
72 | - ppc_msgsnd(PPC_DBELL, 0, info->tag); | |
73 | -} | |
74 | - | |
75 | 45 | #else /* CONFIG_SMP */ |
76 | 46 | void doorbell_exception(struct pt_regs *regs) |
77 | 47 | { |
arch/powerpc/kernel/irq.c
... | ... | @@ -66,7 +66,6 @@ |
66 | 66 | #include <asm/ptrace.h> |
67 | 67 | #include <asm/machdep.h> |
68 | 68 | #include <asm/udbg.h> |
69 | -#include <asm/dbell.h> | |
70 | 69 | #include <asm/smp.h> |
71 | 70 | |
72 | 71 | #ifdef CONFIG_PPC64 |
... | ... | @@ -160,7 +159,8 @@ |
160 | 159 | |
161 | 160 | #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) |
162 | 161 | /* Check for pending doorbell interrupts and resend to ourself */ |
163 | - doorbell_check_self(); | |
162 | + if (cpu_has_feature(CPU_FTR_DBELL)) | |
163 | + smp_muxed_ipi_resend(); | |
164 | 164 | #endif |
165 | 165 | |
166 | 166 | /* |
arch/powerpc/kernel/smp.c
... | ... | @@ -111,35 +111,6 @@ |
111 | 111 | } |
112 | 112 | #endif |
113 | 113 | |
114 | -void smp_message_recv(int msg) | |
115 | -{ | |
116 | - switch(msg) { | |
117 | - case PPC_MSG_CALL_FUNCTION: | |
118 | - generic_smp_call_function_interrupt(); | |
119 | - break; | |
120 | - case PPC_MSG_RESCHEDULE: | |
121 | - /* we notice need_resched on exit */ | |
122 | - break; | |
123 | - case PPC_MSG_CALL_FUNC_SINGLE: | |
124 | - generic_smp_call_function_single_interrupt(); | |
125 | - break; | |
126 | - case PPC_MSG_DEBUGGER_BREAK: | |
127 | - if (crash_ipi_function_ptr) { | |
128 | - crash_ipi_function_ptr(get_irq_regs()); | |
129 | - break; | |
130 | - } | |
131 | -#ifdef CONFIG_DEBUGGER | |
132 | - debugger_ipi(get_irq_regs()); | |
133 | - break; | |
134 | -#endif /* CONFIG_DEBUGGER */ | |
135 | - /* FALLTHROUGH */ | |
136 | - default: | |
137 | - printk("SMP %d: smp_message_recv(): unknown msg %d\n", | |
138 | - smp_processor_id(), msg); | |
139 | - break; | |
140 | - } | |
141 | -} | |
142 | - | |
143 | 114 | static irqreturn_t call_function_action(int irq, void *data) |
144 | 115 | { |
145 | 116 | generic_smp_call_function_interrupt(); |
146 | 117 | |
... | ... | @@ -158,9 +129,17 @@ |
158 | 129 | return IRQ_HANDLED; |
159 | 130 | } |
160 | 131 | |
161 | -static irqreturn_t debug_ipi_action(int irq, void *data) | |
132 | +irqreturn_t debug_ipi_action(int irq, void *data) | |
162 | 133 | { |
163 | - smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | |
134 | + if (crash_ipi_function_ptr) { | |
135 | + crash_ipi_function_ptr(get_irq_regs()); | |
136 | + return IRQ_HANDLED; | |
137 | + } | |
138 | + | |
139 | +#ifdef CONFIG_DEBUGGER | |
140 | + debugger_ipi(get_irq_regs()); | |
141 | +#endif /* CONFIG_DEBUGGER */ | |
142 | + | |
164 | 143 | return IRQ_HANDLED; |
165 | 144 | } |
166 | 145 | |
... | ... | @@ -197,6 +176,59 @@ |
197 | 176 | virq, smp_ipi_name[msg], err); |
198 | 177 | |
199 | 178 | return err; |
179 | +} | |
180 | + | |
181 | +struct cpu_messages { | |
182 | + unsigned long messages; /* current messages bits */ | |
183 | + unsigned long data; /* data for cause ipi */ | |
184 | +}; | |
185 | +static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); | |
186 | + | |
187 | +void smp_muxed_ipi_set_data(int cpu, unsigned long data) | |
188 | +{ | |
189 | + struct cpu_messages *info = &per_cpu(ipi_message, cpu); | |
190 | + | |
191 | + info->data = data; | |
192 | +} | |
193 | + | |
194 | +void smp_muxed_ipi_message_pass(int cpu, int msg) | |
195 | +{ | |
196 | + struct cpu_messages *info = &per_cpu(ipi_message, cpu); | |
197 | + unsigned long *tgt = &info->messages; | |
198 | + | |
199 | + set_bit(msg, tgt); | |
200 | + mb(); | |
201 | + smp_ops->cause_ipi(cpu, info->data); | |
202 | +} | |
203 | + | |
204 | +void smp_muxed_ipi_resend(void) | |
205 | +{ | |
206 | + struct cpu_messages *info = &__get_cpu_var(ipi_message); | |
207 | + unsigned long *tgt = &info->messages; | |
208 | + | |
209 | + if (*tgt) | |
210 | + smp_ops->cause_ipi(smp_processor_id(), info->data); | |
211 | +} | |
212 | + | |
213 | +irqreturn_t smp_ipi_demux(void) | |
214 | +{ | |
215 | + struct cpu_messages *info = &__get_cpu_var(ipi_message); | |
216 | + unsigned long *tgt = &info->messages; | |
217 | + | |
218 | + mb(); /* order any irq clear */ | |
219 | + while (*tgt) { | |
220 | + if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) | |
221 | + generic_smp_call_function_interrupt(); | |
222 | + if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) | |
223 | + reschedule_action(0, NULL); /* upcoming sched hook */ | |
224 | + if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) | |
225 | + generic_smp_call_function_single_interrupt(); | |
226 | +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | |
227 | + if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) | |
228 | + debug_ipi_action(0, NULL); | |
229 | +#endif | |
230 | + } | |
231 | + return IRQ_HANDLED; | |
200 | 232 | } |
201 | 233 | |
202 | 234 | void smp_send_reschedule(int cpu) |
arch/powerpc/platforms/85xx/smp.c
... | ... | @@ -235,8 +235,10 @@ |
235 | 235 | smp_85xx_ops.message_pass = smp_mpic_message_pass; |
236 | 236 | } |
237 | 237 | |
238 | - if (cpu_has_feature(CPU_FTR_DBELL)) | |
239 | - smp_85xx_ops.message_pass = doorbell_message_pass; | |
238 | + if (cpu_has_feature(CPU_FTR_DBELL)) { | |
239 | + smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass; | |
240 | + smp_85xx_ops.cause_ipi = doorbell_cause_ipi; | |
241 | + } | |
240 | 242 | |
241 | 243 | BUG_ON(!smp_85xx_ops.message_pass); |
242 | 244 |
arch/powerpc/platforms/cell/interrupt.c
... | ... | @@ -196,8 +196,20 @@ |
196 | 196 | { |
197 | 197 | int ipi = (int)(long)dev_id; |
198 | 198 | |
199 | - smp_message_recv(ipi); | |
200 | - | |
199 | + switch(ipi) { | |
200 | + case PPC_MSG_CALL_FUNCTION: | |
201 | + generic_smp_call_function_interrupt(); | |
202 | + break; | |
203 | + case PPC_MSG_RESCHEDULE: | |
204 | + /* Upcoming sched hook */ | |
205 | + break; | |
206 | + case PPC_MSG_CALL_FUNC_SINGLE: | |
207 | + generic_smp_call_function_single_interrupt(); | |
208 | + break; | |
209 | + case PPC_MSG_DEBUGGER_BREAK: | |
210 | + debug_ipi_action(0, NULL); | |
211 | + break; | |
212 | + } | |
201 | 213 | return IRQ_HANDLED; |
202 | 214 | } |
203 | 215 | static void iic_request_ipi(int ipi, const char *name) |
arch/powerpc/platforms/iseries/irq.c
... | ... | @@ -42,7 +42,6 @@ |
42 | 42 | #include "irq.h" |
43 | 43 | #include "pci.h" |
44 | 44 | #include "call_pci.h" |
45 | -#include "smp.h" | |
46 | 45 | |
47 | 46 | #ifdef CONFIG_PCI |
48 | 47 | |
... | ... | @@ -316,7 +315,7 @@ |
316 | 315 | #ifdef CONFIG_SMP |
317 | 316 | if (get_lppaca()->int_dword.fields.ipi_cnt) { |
318 | 317 | get_lppaca()->int_dword.fields.ipi_cnt = 0; |
319 | - iSeries_smp_message_recv(); | |
318 | + smp_ipi_demux(); | |
320 | 319 | } |
321 | 320 | #endif /* CONFIG_SMP */ |
322 | 321 | if (hvlpevent_is_pending()) |
arch/powerpc/platforms/iseries/smp.c
... | ... | @@ -42,26 +42,8 @@ |
42 | 42 | #include <asm/cputable.h> |
43 | 43 | #include <asm/system.h> |
44 | 44 | |
45 | -#include "smp.h" | |
46 | - | |
47 | -static unsigned long iSeries_smp_message[NR_CPUS]; | |
48 | - | |
49 | -void iSeries_smp_message_recv(void) | |
45 | +static void smp_iSeries_cause_ipi(int cpu, unsigned long data) | |
50 | 46 | { |
51 | - int cpu = smp_processor_id(); | |
52 | - int msg; | |
53 | - | |
54 | - if (num_online_cpus() < 2) | |
55 | - return; | |
56 | - | |
57 | - for (msg = 0; msg < 4; msg++) | |
58 | - if (test_and_clear_bit(msg, &iSeries_smp_message[cpu])) | |
59 | - smp_message_recv(msg); | |
60 | -} | |
61 | - | |
62 | -static void smp_iSeries_message_pass(int cpu, int msg) | |
63 | -{ | |
64 | - set_bit(msg, &iSeries_smp_message[cpu]); | |
65 | 47 | HvCall_sendIPI(&(paca[cpu])); |
66 | 48 | } |
67 | 49 | |
... | ... | @@ -93,7 +75,8 @@ |
93 | 75 | } |
94 | 76 | |
95 | 77 | static struct smp_ops_t iSeries_smp_ops = { |
96 | - .message_pass = smp_iSeries_message_pass, | |
78 | + .message_pass = smp_muxed_ipi_message_pass, | |
79 | + .cause_ipi = smp_iSeries_cause_ipi, | |
97 | 80 | .probe = smp_iSeries_probe, |
98 | 81 | .kick_cpu = smp_iSeries_kick_cpu, |
99 | 82 | .setup_cpu = smp_iSeries_setup_cpu, |
arch/powerpc/platforms/iseries/smp.h
arch/powerpc/platforms/powermac/smp.c
... | ... | @@ -156,28 +156,13 @@ |
156 | 156 | /* |
157 | 157 | * On powersurge (old SMP powermac architecture) we don't have |
158 | 158 | * separate IPIs for separate messages like openpic does. Instead |
159 | - * we have a bitmap for each processor, where a 1 bit means that | |
160 | - * the corresponding message is pending for that processor. | |
161 | - * Ideally each cpu's entry would be in a different cache line. | |
159 | + * use the generic demux helpers | |
162 | 160 | * -- paulus. |
163 | 161 | */ |
164 | -static unsigned long psurge_smp_message[NR_CPUS]; | |
165 | - | |
166 | 162 | void psurge_smp_message_recv(void) |
167 | 163 | { |
168 | - int cpu = smp_processor_id(); | |
169 | - int msg; | |
170 | - | |
171 | - /* clear interrupt */ | |
172 | - psurge_clr_ipi(cpu); | |
173 | - | |
174 | - if (num_online_cpus() < 2) | |
175 | - return; | |
176 | - | |
177 | - /* make sure there is a message there */ | |
178 | - for (msg = 0; msg < 4; msg++) | |
179 | - if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) | |
180 | - smp_message_recv(msg); | |
164 | + psurge_clr_ipi(smp_processor_id()); | |
165 | + smp_ipi_demux(); | |
181 | 166 | } |
182 | 167 | |
183 | 168 | irqreturn_t psurge_primary_intr(int irq, void *d) |
184 | 169 | |
... | ... | @@ -186,9 +171,8 @@ |
186 | 171 | return IRQ_HANDLED; |
187 | 172 | } |
188 | 173 | |
189 | -static void smp_psurge_message_pass(int cpu, int msg) | |
174 | +static void smp_psurge_cause_ipi(int cpu, unsigned long data) | |
190 | 175 | { |
191 | - set_bit(msg, &psurge_smp_message[cpu]); | |
192 | 176 | psurge_set_ipi(cpu); |
193 | 177 | } |
194 | 178 | |
... | ... | @@ -428,7 +412,8 @@ |
428 | 412 | |
429 | 413 | /* PowerSurge-style Macs */ |
430 | 414 | struct smp_ops_t psurge_smp_ops = { |
431 | - .message_pass = smp_psurge_message_pass, | |
415 | + .message_pass = smp_muxed_ipi_message_pass, | |
416 | + .cause_ipi = smp_psurge_cause_ipi, | |
432 | 417 | .probe = smp_psurge_probe, |
433 | 418 | .kick_cpu = smp_psurge_kick_cpu, |
434 | 419 | .setup_cpu = smp_psurge_setup_cpu, |
arch/powerpc/platforms/pseries/smp.c
... | ... | @@ -207,7 +207,8 @@ |
207 | 207 | }; |
208 | 208 | |
209 | 209 | static struct smp_ops_t pSeries_xics_smp_ops = { |
210 | - .message_pass = NULL, /* Filled at runtime by xics_smp_probe() */ | |
210 | + .message_pass = smp_muxed_ipi_message_pass, | |
211 | + .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */ | |
211 | 212 | .probe = xics_smp_probe, |
212 | 213 | .kick_cpu = smp_pSeries_kick_cpu, |
213 | 214 | .setup_cpu = smp_xics_setup_cpu, |
arch/powerpc/platforms/wsp/smp.c
... | ... | @@ -75,7 +75,8 @@ |
75 | 75 | } |
76 | 76 | |
77 | 77 | static struct smp_ops_t a2_smp_ops = { |
78 | - .message_pass = doorbell_message_pass, | |
78 | + .message_pass = smp_muxed_ipi_message_pass, | |
79 | + .cause_ipi = doorbell_cause_ipi, | |
79 | 80 | .probe = smp_a2_probe, |
80 | 81 | .kick_cpu = smp_a2_kick_cpu, |
81 | 82 | .setup_cpu = smp_a2_setup_cpu, |
arch/powerpc/sysdev/xics/icp-hv.c
... | ... | @@ -118,12 +118,8 @@ |
118 | 118 | |
119 | 119 | #ifdef CONFIG_SMP |
120 | 120 | |
121 | -static void icp_hv_message_pass(int cpu, int msg) | |
121 | +static void icp_hv_cause_ipi(int cpu, unsigned long data) | |
122 | 122 | { |
123 | - unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | |
124 | - | |
125 | - set_bit(msg, tgt); | |
126 | - mb(); | |
127 | 123 | icp_hv_set_qirr(cpu, IPI_PRIORITY); |
128 | 124 | } |
129 | 125 | |
... | ... | @@ -133,7 +129,7 @@ |
133 | 129 | |
134 | 130 | icp_hv_set_qirr(cpu, 0xff); |
135 | 131 | |
136 | - return xics_ipi_dispatch(cpu); | |
132 | + return smp_ipi_demux(); | |
137 | 133 | } |
138 | 134 | |
139 | 135 | #endif /* CONFIG_SMP */ |
... | ... | @@ -146,7 +142,7 @@ |
146 | 142 | .flush_ipi = icp_hv_flush_ipi, |
147 | 143 | #ifdef CONFIG_SMP |
148 | 144 | .ipi_action = icp_hv_ipi_action, |
149 | - .message_pass = icp_hv_message_pass, | |
145 | + .cause_ipi = icp_hv_cause_ipi, | |
150 | 146 | #endif |
151 | 147 | }; |
152 | 148 |
arch/powerpc/sysdev/xics/icp-native.c
... | ... | @@ -134,12 +134,8 @@ |
134 | 134 | |
135 | 135 | #ifdef CONFIG_SMP |
136 | 136 | |
137 | -static void icp_native_message_pass(int cpu, int msg) | |
137 | +static void icp_native_cause_ipi(int cpu, unsigned long data) | |
138 | 138 | { |
139 | - unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | |
140 | - | |
141 | - set_bit(msg, tgt); | |
142 | - mb(); | |
143 | 139 | icp_native_set_qirr(cpu, IPI_PRIORITY); |
144 | 140 | } |
145 | 141 | |
... | ... | @@ -149,7 +145,7 @@ |
149 | 145 | |
150 | 146 | icp_native_set_qirr(cpu, 0xff); |
151 | 147 | |
152 | - return xics_ipi_dispatch(cpu); | |
148 | + return smp_ipi_demux(); | |
153 | 149 | } |
154 | 150 | |
155 | 151 | #endif /* CONFIG_SMP */ |
... | ... | @@ -267,7 +263,7 @@ |
267 | 263 | .flush_ipi = icp_native_flush_ipi, |
268 | 264 | #ifdef CONFIG_SMP |
269 | 265 | .ipi_action = icp_native_ipi_action, |
270 | - .message_pass = icp_native_message_pass, | |
266 | + .cause_ipi = icp_native_cause_ipi, | |
271 | 267 | #endif |
272 | 268 | }; |
273 | 269 |
arch/powerpc/sysdev/xics/xics-common.c
... | ... | @@ -126,32 +126,6 @@ |
126 | 126 | |
127 | 127 | #ifdef CONFIG_SMP |
128 | 128 | |
129 | -DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); | |
130 | - | |
131 | -irqreturn_t xics_ipi_dispatch(int cpu) | |
132 | -{ | |
133 | - unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | |
134 | - | |
135 | - mb(); /* order mmio clearing qirr */ | |
136 | - while (*tgt) { | |
137 | - if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { | |
138 | - smp_message_recv(PPC_MSG_CALL_FUNCTION); | |
139 | - } | |
140 | - if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { | |
141 | - smp_message_recv(PPC_MSG_RESCHEDULE); | |
142 | - } | |
143 | - if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { | |
144 | - smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); | |
145 | - } | |
146 | -#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | |
147 | - if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { | |
148 | - smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | |
149 | - } | |
150 | -#endif | |
151 | - } | |
152 | - return IRQ_HANDLED; | |
153 | -} | |
154 | - | |
155 | 129 | static void xics_request_ipi(void) |
156 | 130 | { |
157 | 131 | unsigned int ipi; |
... | ... | @@ -170,8 +144,8 @@ |
170 | 144 | |
171 | 145 | int __init xics_smp_probe(void) |
172 | 146 | { |
173 | - /* Setup message_pass callback based on which ICP is used */ | |
174 | - smp_ops->message_pass = icp_ops->message_pass; | |
147 | + /* Setup cause_ipi callback based on which ICP is used */ | |
148 | + smp_ops->cause_ipi = icp_ops->cause_ipi; | |
175 | 149 | |
176 | 150 | /* Register all the IPIs */ |
177 | 151 | xics_request_ipi(); |