Commit d4be4f37d9d2a5afc8e79a95beafbac4b83f20c5
1 parent
5a7b3ff467
Exists in
master
and in
7 other branches
ppc64: remove ppc_irq_dispatch_handler
Use __do_IRQ instead. The only difference is that every controller is now assumed to have an end() routine (only xics_8259 did not). Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Showing 7 changed files with 12 additions and 118 deletions Inline Diff
arch/powerpc/kernel/misc_64.S
1 | /* | 1 | /* |
2 | * arch/powerpc/kernel/misc64.S | 2 | * arch/powerpc/kernel/misc64.S |
3 | * | 3 | * |
4 | * This file contains miscellaneous low-level functions. | 4 | * This file contains miscellaneous low-level functions. |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
6 | * | 6 | * |
7 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) | 7 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) |
8 | * and Paul Mackerras. | 8 | * and Paul Mackerras. |
9 | * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) | 9 | * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) |
10 | * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) | 10 | * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or | 12 | * This program is free software; you can redistribute it and/or |
13 | * modify it under the terms of the GNU General Public License | 13 | * modify it under the terms of the GNU General Public License |
14 | * as published by the Free Software Foundation; either version | 14 | * as published by the Free Software Foundation; either version |
15 | * 2 of the License, or (at your option) any later version. | 15 | * 2 of the License, or (at your option) any later version. |
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/config.h> | 19 | #include <linux/config.h> |
20 | #include <linux/sys.h> | 20 | #include <linux/sys.h> |
21 | #include <asm/unistd.h> | 21 | #include <asm/unistd.h> |
22 | #include <asm/errno.h> | 22 | #include <asm/errno.h> |
23 | #include <asm/processor.h> | 23 | #include <asm/processor.h> |
24 | #include <asm/page.h> | 24 | #include <asm/page.h> |
25 | #include <asm/cache.h> | 25 | #include <asm/cache.h> |
26 | #include <asm/ppc_asm.h> | 26 | #include <asm/ppc_asm.h> |
27 | #include <asm/asm-offsets.h> | 27 | #include <asm/asm-offsets.h> |
28 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
29 | #include <asm/thread_info.h> | 29 | #include <asm/thread_info.h> |
30 | 30 | ||
31 | .text | 31 | .text |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Returns (address we are running at) - (address we were linked at) | 34 | * Returns (address we are running at) - (address we were linked at) |
35 | * for use before the text and data are mapped to KERNELBASE. | 35 | * for use before the text and data are mapped to KERNELBASE. |
36 | */ | 36 | */ |
37 | 37 | ||
38 | _GLOBAL(reloc_offset) | 38 | _GLOBAL(reloc_offset) |
39 | mflr r0 | 39 | mflr r0 |
40 | bl 1f | 40 | bl 1f |
41 | 1: mflr r3 | 41 | 1: mflr r3 |
42 | LOADADDR(r4,1b) | 42 | LOADADDR(r4,1b) |
43 | subf r3,r4,r3 | 43 | subf r3,r4,r3 |
44 | mtlr r0 | 44 | mtlr r0 |
45 | blr | 45 | blr |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * add_reloc_offset(x) returns x + reloc_offset(). | 48 | * add_reloc_offset(x) returns x + reloc_offset(). |
49 | */ | 49 | */ |
50 | _GLOBAL(add_reloc_offset) | 50 | _GLOBAL(add_reloc_offset) |
51 | mflr r0 | 51 | mflr r0 |
52 | bl 1f | 52 | bl 1f |
53 | 1: mflr r5 | 53 | 1: mflr r5 |
54 | LOADADDR(r4,1b) | 54 | LOADADDR(r4,1b) |
55 | subf r5,r4,r5 | 55 | subf r5,r4,r5 |
56 | add r3,r3,r5 | 56 | add r3,r3,r5 |
57 | mtlr r0 | 57 | mtlr r0 |
58 | blr | 58 | blr |
59 | 59 | ||
60 | _GLOBAL(get_msr) | 60 | _GLOBAL(get_msr) |
61 | mfmsr r3 | 61 | mfmsr r3 |
62 | blr | 62 | blr |
63 | 63 | ||
64 | _GLOBAL(get_dar) | 64 | _GLOBAL(get_dar) |
65 | mfdar r3 | 65 | mfdar r3 |
66 | blr | 66 | blr |
67 | 67 | ||
68 | _GLOBAL(get_srr0) | 68 | _GLOBAL(get_srr0) |
69 | mfsrr0 r3 | 69 | mfsrr0 r3 |
70 | blr | 70 | blr |
71 | 71 | ||
72 | _GLOBAL(get_srr1) | 72 | _GLOBAL(get_srr1) |
73 | mfsrr1 r3 | 73 | mfsrr1 r3 |
74 | blr | 74 | blr |
75 | 75 | ||
76 | _GLOBAL(get_sp) | 76 | _GLOBAL(get_sp) |
77 | mr r3,r1 | 77 | mr r3,r1 |
78 | blr | 78 | blr |
79 | 79 | ||
80 | #ifdef CONFIG_IRQSTACKS | 80 | #ifdef CONFIG_IRQSTACKS |
81 | _GLOBAL(call_do_softirq) | 81 | _GLOBAL(call_do_softirq) |
82 | mflr r0 | 82 | mflr r0 |
83 | std r0,16(r1) | 83 | std r0,16(r1) |
84 | stdu r1,THREAD_SIZE-112(r3) | 84 | stdu r1,THREAD_SIZE-112(r3) |
85 | mr r1,r3 | 85 | mr r1,r3 |
86 | bl .__do_softirq | 86 | bl .__do_softirq |
87 | ld r1,0(r1) | 87 | ld r1,0(r1) |
88 | ld r0,16(r1) | 88 | ld r0,16(r1) |
89 | mtlr r0 | 89 | mtlr r0 |
90 | blr | 90 | blr |
91 | 91 | ||
92 | _GLOBAL(call_ppc_irq_dispatch_handler) | 92 | _GLOBAL(call___do_IRQ) |
93 | mflr r0 | 93 | mflr r0 |
94 | std r0,16(r1) | 94 | std r0,16(r1) |
95 | stdu r1,THREAD_SIZE-112(r5) | 95 | stdu r1,THREAD_SIZE-112(r5) |
96 | mr r1,r5 | 96 | mr r1,r5 |
97 | bl .ppc_irq_dispatch_handler | 97 | bl .__do_IRQ |
98 | ld r1,0(r1) | 98 | ld r1,0(r1) |
99 | ld r0,16(r1) | 99 | ld r0,16(r1) |
100 | mtlr r0 | 100 | mtlr r0 |
101 | blr | 101 | blr |
102 | #endif /* CONFIG_IRQSTACKS */ | 102 | #endif /* CONFIG_IRQSTACKS */ |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * To be called by C code which needs to do some operations with MMU | 105 | * To be called by C code which needs to do some operations with MMU |
106 | * disabled. Note that interrupts have to be disabled by the caller | 106 | * disabled. Note that interrupts have to be disabled by the caller |
107 | * prior to calling us. The code called _MUST_ be in the RMO of course | 107 | * prior to calling us. The code called _MUST_ be in the RMO of course |
108 | * and part of the linear mapping as we don't attempt to translate the | 108 | * and part of the linear mapping as we don't attempt to translate the |
109 | * stack pointer at all. The function is called with the stack switched | 109 | * stack pointer at all. The function is called with the stack switched |
110 | * to this CPU emergency stack | 110 | * to this CPU emergency stack |
111 | * | 111 | * |
112 | * prototype is void *call_with_mmu_off(void *func, void *data); | 112 | * prototype is void *call_with_mmu_off(void *func, void *data); |
113 | * | 113 | * |
114 | * the called function is expected to be of the form | 114 | * the called function is expected to be of the form |
115 | * | 115 | * |
116 | * void *called(void *data); | 116 | * void *called(void *data); |
117 | */ | 117 | */ |
118 | _GLOBAL(call_with_mmu_off) | 118 | _GLOBAL(call_with_mmu_off) |
119 | mflr r0 /* get link, save it on stackframe */ | 119 | mflr r0 /* get link, save it on stackframe */ |
120 | std r0,16(r1) | 120 | std r0,16(r1) |
121 | mr r1,r5 /* save old stack ptr */ | 121 | mr r1,r5 /* save old stack ptr */ |
122 | ld r1,PACAEMERGSP(r13) /* get emerg. stack */ | 122 | ld r1,PACAEMERGSP(r13) /* get emerg. stack */ |
123 | subi r1,r1,STACK_FRAME_OVERHEAD | 123 | subi r1,r1,STACK_FRAME_OVERHEAD |
124 | std r0,16(r1) /* save link on emerg. stack */ | 124 | std r0,16(r1) /* save link on emerg. stack */ |
125 | std r5,0(r1) /* save old stack ptr in backchain */ | 125 | std r5,0(r1) /* save old stack ptr in backchain */ |
126 | ld r3,0(r3) /* get to real function ptr (assume same TOC) */ | 126 | ld r3,0(r3) /* get to real function ptr (assume same TOC) */ |
127 | bl 2f /* we need LR to return, continue at label 2 */ | 127 | bl 2f /* we need LR to return, continue at label 2 */ |
128 | 128 | ||
129 | ld r0,16(r1) /* we return here from the call, get LR and */ | 129 | ld r0,16(r1) /* we return here from the call, get LR and */ |
130 | ld r1,0(r1) /* .. old stack ptr */ | 130 | ld r1,0(r1) /* .. old stack ptr */ |
131 | mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */ | 131 | mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */ |
132 | mfmsr r4 | 132 | mfmsr r4 |
133 | ori r4,r4,MSR_IR|MSR_DR | 133 | ori r4,r4,MSR_IR|MSR_DR |
134 | mtspr SPRN_SRR1,r4 | 134 | mtspr SPRN_SRR1,r4 |
135 | rfid | 135 | rfid |
136 | 136 | ||
137 | 2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */ | 137 | 2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */ |
138 | mr r3,r4 /* get parameter */ | 138 | mr r3,r4 /* get parameter */ |
139 | mfmsr r0 | 139 | mfmsr r0 |
140 | ori r0,r0,MSR_IR|MSR_DR | 140 | ori r0,r0,MSR_IR|MSR_DR |
141 | xori r0,r0,MSR_IR|MSR_DR | 141 | xori r0,r0,MSR_IR|MSR_DR |
142 | mtspr SPRN_SRR1,r0 | 142 | mtspr SPRN_SRR1,r0 |
143 | rfid | 143 | rfid |
144 | 144 | ||
145 | 145 | ||
146 | .section ".toc","aw" | 146 | .section ".toc","aw" |
147 | PPC64_CACHES: | 147 | PPC64_CACHES: |
148 | .tc ppc64_caches[TC],ppc64_caches | 148 | .tc ppc64_caches[TC],ppc64_caches |
149 | .section ".text" | 149 | .section ".text" |
150 | 150 | ||
151 | /* | 151 | /* |
152 | * Write any modified data cache blocks out to memory | 152 | * Write any modified data cache blocks out to memory |
153 | * and invalidate the corresponding instruction cache blocks. | 153 | * and invalidate the corresponding instruction cache blocks. |
154 | * | 154 | * |
155 | * flush_icache_range(unsigned long start, unsigned long stop) | 155 | * flush_icache_range(unsigned long start, unsigned long stop) |
156 | * | 156 | * |
157 | * flush all bytes from start through stop-1 inclusive | 157 | * flush all bytes from start through stop-1 inclusive |
158 | */ | 158 | */ |
159 | 159 | ||
160 | _KPROBE(__flush_icache_range) | 160 | _KPROBE(__flush_icache_range) |
161 | 161 | ||
162 | /* | 162 | /* |
163 | * Flush the data cache to memory | 163 | * Flush the data cache to memory |
164 | * | 164 | * |
165 | * Different systems have different cache line sizes | 165 | * Different systems have different cache line sizes |
166 | * and in some cases i-cache and d-cache line sizes differ from | 166 | * and in some cases i-cache and d-cache line sizes differ from |
167 | * each other. | 167 | * each other. |
168 | */ | 168 | */ |
169 | ld r10,PPC64_CACHES@toc(r2) | 169 | ld r10,PPC64_CACHES@toc(r2) |
170 | lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */ | 170 | lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */ |
171 | addi r5,r7,-1 | 171 | addi r5,r7,-1 |
172 | andc r6,r3,r5 /* round low to line bdy */ | 172 | andc r6,r3,r5 /* round low to line bdy */ |
173 | subf r8,r6,r4 /* compute length */ | 173 | subf r8,r6,r4 /* compute length */ |
174 | add r8,r8,r5 /* ensure we get enough */ | 174 | add r8,r8,r5 /* ensure we get enough */ |
175 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */ | 175 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */ |
176 | srw. r8,r8,r9 /* compute line count */ | 176 | srw. r8,r8,r9 /* compute line count */ |
177 | beqlr /* nothing to do? */ | 177 | beqlr /* nothing to do? */ |
178 | mtctr r8 | 178 | mtctr r8 |
179 | 1: dcbst 0,r6 | 179 | 1: dcbst 0,r6 |
180 | add r6,r6,r7 | 180 | add r6,r6,r7 |
181 | bdnz 1b | 181 | bdnz 1b |
182 | sync | 182 | sync |
183 | 183 | ||
184 | /* Now invalidate the instruction cache */ | 184 | /* Now invalidate the instruction cache */ |
185 | 185 | ||
186 | lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */ | 186 | lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */ |
187 | addi r5,r7,-1 | 187 | addi r5,r7,-1 |
188 | andc r6,r3,r5 /* round low to line bdy */ | 188 | andc r6,r3,r5 /* round low to line bdy */ |
189 | subf r8,r6,r4 /* compute length */ | 189 | subf r8,r6,r4 /* compute length */ |
190 | add r8,r8,r5 | 190 | add r8,r8,r5 |
191 | lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */ | 191 | lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */ |
192 | srw. r8,r8,r9 /* compute line count */ | 192 | srw. r8,r8,r9 /* compute line count */ |
193 | beqlr /* nothing to do? */ | 193 | beqlr /* nothing to do? */ |
194 | mtctr r8 | 194 | mtctr r8 |
195 | 2: icbi 0,r6 | 195 | 2: icbi 0,r6 |
196 | add r6,r6,r7 | 196 | add r6,r6,r7 |
197 | bdnz 2b | 197 | bdnz 2b |
198 | isync | 198 | isync |
199 | blr | 199 | blr |
200 | .previous .text | 200 | .previous .text |
201 | /* | 201 | /* |
202 | * Like above, but only do the D-cache. | 202 | * Like above, but only do the D-cache. |
203 | * | 203 | * |
204 | * flush_dcache_range(unsigned long start, unsigned long stop) | 204 | * flush_dcache_range(unsigned long start, unsigned long stop) |
205 | * | 205 | * |
206 | * flush all bytes from start to stop-1 inclusive | 206 | * flush all bytes from start to stop-1 inclusive |
207 | */ | 207 | */ |
208 | _GLOBAL(flush_dcache_range) | 208 | _GLOBAL(flush_dcache_range) |
209 | 209 | ||
210 | /* | 210 | /* |
211 | * Flush the data cache to memory | 211 | * Flush the data cache to memory |
212 | * | 212 | * |
213 | * Different systems have different cache line sizes | 213 | * Different systems have different cache line sizes |
214 | */ | 214 | */ |
215 | ld r10,PPC64_CACHES@toc(r2) | 215 | ld r10,PPC64_CACHES@toc(r2) |
216 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ | 216 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ |
217 | addi r5,r7,-1 | 217 | addi r5,r7,-1 |
218 | andc r6,r3,r5 /* round low to line bdy */ | 218 | andc r6,r3,r5 /* round low to line bdy */ |
219 | subf r8,r6,r4 /* compute length */ | 219 | subf r8,r6,r4 /* compute length */ |
220 | add r8,r8,r5 /* ensure we get enough */ | 220 | add r8,r8,r5 /* ensure we get enough */ |
221 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ | 221 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ |
222 | srw. r8,r8,r9 /* compute line count */ | 222 | srw. r8,r8,r9 /* compute line count */ |
223 | beqlr /* nothing to do? */ | 223 | beqlr /* nothing to do? */ |
224 | mtctr r8 | 224 | mtctr r8 |
225 | 0: dcbst 0,r6 | 225 | 0: dcbst 0,r6 |
226 | add r6,r6,r7 | 226 | add r6,r6,r7 |
227 | bdnz 0b | 227 | bdnz 0b |
228 | sync | 228 | sync |
229 | blr | 229 | blr |
230 | 230 | ||
231 | /* | 231 | /* |
232 | * Like above, but works on non-mapped physical addresses. | 232 | * Like above, but works on non-mapped physical addresses. |
233 | * Use only for non-LPAR setups ! It also assumes real mode | 233 | * Use only for non-LPAR setups ! It also assumes real mode |
234 | * is cacheable. Used for flushing out the DART before using | 234 | * is cacheable. Used for flushing out the DART before using |
235 | * it as uncacheable memory | 235 | * it as uncacheable memory |
236 | * | 236 | * |
237 | * flush_dcache_phys_range(unsigned long start, unsigned long stop) | 237 | * flush_dcache_phys_range(unsigned long start, unsigned long stop) |
238 | * | 238 | * |
239 | * flush all bytes from start to stop-1 inclusive | 239 | * flush all bytes from start to stop-1 inclusive |
240 | */ | 240 | */ |
241 | _GLOBAL(flush_dcache_phys_range) | 241 | _GLOBAL(flush_dcache_phys_range) |
242 | ld r10,PPC64_CACHES@toc(r2) | 242 | ld r10,PPC64_CACHES@toc(r2) |
243 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ | 243 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ |
244 | addi r5,r7,-1 | 244 | addi r5,r7,-1 |
245 | andc r6,r3,r5 /* round low to line bdy */ | 245 | andc r6,r3,r5 /* round low to line bdy */ |
246 | subf r8,r6,r4 /* compute length */ | 246 | subf r8,r6,r4 /* compute length */ |
247 | add r8,r8,r5 /* ensure we get enough */ | 247 | add r8,r8,r5 /* ensure we get enough */ |
248 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ | 248 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ |
249 | srw. r8,r8,r9 /* compute line count */ | 249 | srw. r8,r8,r9 /* compute line count */ |
250 | beqlr /* nothing to do? */ | 250 | beqlr /* nothing to do? */ |
251 | mfmsr r5 /* Disable MMU Data Relocation */ | 251 | mfmsr r5 /* Disable MMU Data Relocation */ |
252 | ori r0,r5,MSR_DR | 252 | ori r0,r5,MSR_DR |
253 | xori r0,r0,MSR_DR | 253 | xori r0,r0,MSR_DR |
254 | sync | 254 | sync |
255 | mtmsr r0 | 255 | mtmsr r0 |
256 | sync | 256 | sync |
257 | isync | 257 | isync |
258 | mtctr r8 | 258 | mtctr r8 |
259 | 0: dcbst 0,r6 | 259 | 0: dcbst 0,r6 |
260 | add r6,r6,r7 | 260 | add r6,r6,r7 |
261 | bdnz 0b | 261 | bdnz 0b |
262 | sync | 262 | sync |
263 | isync | 263 | isync |
264 | mtmsr r5 /* Re-enable MMU Data Relocation */ | 264 | mtmsr r5 /* Re-enable MMU Data Relocation */ |
265 | sync | 265 | sync |
266 | isync | 266 | isync |
267 | blr | 267 | blr |
268 | 268 | ||
269 | _GLOBAL(flush_inval_dcache_range) | 269 | _GLOBAL(flush_inval_dcache_range) |
270 | ld r10,PPC64_CACHES@toc(r2) | 270 | ld r10,PPC64_CACHES@toc(r2) |
271 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ | 271 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ |
272 | addi r5,r7,-1 | 272 | addi r5,r7,-1 |
273 | andc r6,r3,r5 /* round low to line bdy */ | 273 | andc r6,r3,r5 /* round low to line bdy */ |
274 | subf r8,r6,r4 /* compute length */ | 274 | subf r8,r6,r4 /* compute length */ |
275 | add r8,r8,r5 /* ensure we get enough */ | 275 | add r8,r8,r5 /* ensure we get enough */ |
276 | lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */ | 276 | lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */ |
277 | srw. r8,r8,r9 /* compute line count */ | 277 | srw. r8,r8,r9 /* compute line count */ |
278 | beqlr /* nothing to do? */ | 278 | beqlr /* nothing to do? */ |
279 | sync | 279 | sync |
280 | isync | 280 | isync |
281 | mtctr r8 | 281 | mtctr r8 |
282 | 0: dcbf 0,r6 | 282 | 0: dcbf 0,r6 |
283 | add r6,r6,r7 | 283 | add r6,r6,r7 |
284 | bdnz 0b | 284 | bdnz 0b |
285 | sync | 285 | sync |
286 | isync | 286 | isync |
287 | blr | 287 | blr |
288 | 288 | ||
289 | 289 | ||
290 | /* | 290 | /* |
291 | * Flush a particular page from the data cache to RAM. | 291 | * Flush a particular page from the data cache to RAM. |
292 | * Note: this is necessary because the instruction cache does *not* | 292 | * Note: this is necessary because the instruction cache does *not* |
293 | * snoop from the data cache. | 293 | * snoop from the data cache. |
294 | * | 294 | * |
295 | * void __flush_dcache_icache(void *page) | 295 | * void __flush_dcache_icache(void *page) |
296 | */ | 296 | */ |
297 | _GLOBAL(__flush_dcache_icache) | 297 | _GLOBAL(__flush_dcache_icache) |
298 | /* | 298 | /* |
299 | * Flush the data cache to memory | 299 | * Flush the data cache to memory |
300 | * | 300 | * |
301 | * Different systems have different cache line sizes | 301 | * Different systems have different cache line sizes |
302 | */ | 302 | */ |
303 | 303 | ||
304 | /* Flush the dcache */ | 304 | /* Flush the dcache */ |
305 | ld r7,PPC64_CACHES@toc(r2) | 305 | ld r7,PPC64_CACHES@toc(r2) |
306 | clrrdi r3,r3,PAGE_SHIFT /* Page align */ | 306 | clrrdi r3,r3,PAGE_SHIFT /* Page align */ |
307 | lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ | 307 | lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ |
308 | lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ | 308 | lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ |
309 | mr r6,r3 | 309 | mr r6,r3 |
310 | mtctr r4 | 310 | mtctr r4 |
311 | 0: dcbst 0,r6 | 311 | 0: dcbst 0,r6 |
312 | add r6,r6,r5 | 312 | add r6,r6,r5 |
313 | bdnz 0b | 313 | bdnz 0b |
314 | sync | 314 | sync |
315 | 315 | ||
316 | /* Now invalidate the icache */ | 316 | /* Now invalidate the icache */ |
317 | 317 | ||
318 | lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */ | 318 | lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */ |
319 | lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */ | 319 | lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */ |
320 | mtctr r4 | 320 | mtctr r4 |
321 | 1: icbi 0,r3 | 321 | 1: icbi 0,r3 |
322 | add r3,r3,r5 | 322 | add r3,r3,r5 |
323 | bdnz 1b | 323 | bdnz 1b |
324 | isync | 324 | isync |
325 | blr | 325 | blr |
326 | 326 | ||
327 | /* | 327 | /* |
328 | * I/O string operations | 328 | * I/O string operations |
329 | * | 329 | * |
330 | * insb(port, buf, len) | 330 | * insb(port, buf, len) |
331 | * outsb(port, buf, len) | 331 | * outsb(port, buf, len) |
332 | * insw(port, buf, len) | 332 | * insw(port, buf, len) |
333 | * outsw(port, buf, len) | 333 | * outsw(port, buf, len) |
334 | * insl(port, buf, len) | 334 | * insl(port, buf, len) |
335 | * outsl(port, buf, len) | 335 | * outsl(port, buf, len) |
336 | * insw_ns(port, buf, len) | 336 | * insw_ns(port, buf, len) |
337 | * outsw_ns(port, buf, len) | 337 | * outsw_ns(port, buf, len) |
338 | * insl_ns(port, buf, len) | 338 | * insl_ns(port, buf, len) |
339 | * outsl_ns(port, buf, len) | 339 | * outsl_ns(port, buf, len) |
340 | * | 340 | * |
341 | * The *_ns versions don't do byte-swapping. | 341 | * The *_ns versions don't do byte-swapping. |
342 | */ | 342 | */ |
343 | _GLOBAL(_insb) | 343 | _GLOBAL(_insb) |
344 | cmpwi 0,r5,0 | 344 | cmpwi 0,r5,0 |
345 | mtctr r5 | 345 | mtctr r5 |
346 | subi r4,r4,1 | 346 | subi r4,r4,1 |
347 | blelr- | 347 | blelr- |
348 | 00: lbz r5,0(r3) | 348 | 00: lbz r5,0(r3) |
349 | eieio | 349 | eieio |
350 | stbu r5,1(r4) | 350 | stbu r5,1(r4) |
351 | bdnz 00b | 351 | bdnz 00b |
352 | twi 0,r5,0 | 352 | twi 0,r5,0 |
353 | isync | 353 | isync |
354 | blr | 354 | blr |
355 | 355 | ||
356 | _GLOBAL(_outsb) | 356 | _GLOBAL(_outsb) |
357 | cmpwi 0,r5,0 | 357 | cmpwi 0,r5,0 |
358 | mtctr r5 | 358 | mtctr r5 |
359 | subi r4,r4,1 | 359 | subi r4,r4,1 |
360 | blelr- | 360 | blelr- |
361 | 00: lbzu r5,1(r4) | 361 | 00: lbzu r5,1(r4) |
362 | stb r5,0(r3) | 362 | stb r5,0(r3) |
363 | bdnz 00b | 363 | bdnz 00b |
364 | sync | 364 | sync |
365 | blr | 365 | blr |
366 | 366 | ||
367 | _GLOBAL(_insw) | 367 | _GLOBAL(_insw) |
368 | cmpwi 0,r5,0 | 368 | cmpwi 0,r5,0 |
369 | mtctr r5 | 369 | mtctr r5 |
370 | subi r4,r4,2 | 370 | subi r4,r4,2 |
371 | blelr- | 371 | blelr- |
372 | 00: lhbrx r5,0,r3 | 372 | 00: lhbrx r5,0,r3 |
373 | eieio | 373 | eieio |
374 | sthu r5,2(r4) | 374 | sthu r5,2(r4) |
375 | bdnz 00b | 375 | bdnz 00b |
376 | twi 0,r5,0 | 376 | twi 0,r5,0 |
377 | isync | 377 | isync |
378 | blr | 378 | blr |
379 | 379 | ||
380 | _GLOBAL(_outsw) | 380 | _GLOBAL(_outsw) |
381 | cmpwi 0,r5,0 | 381 | cmpwi 0,r5,0 |
382 | mtctr r5 | 382 | mtctr r5 |
383 | subi r4,r4,2 | 383 | subi r4,r4,2 |
384 | blelr- | 384 | blelr- |
385 | 00: lhzu r5,2(r4) | 385 | 00: lhzu r5,2(r4) |
386 | sthbrx r5,0,r3 | 386 | sthbrx r5,0,r3 |
387 | bdnz 00b | 387 | bdnz 00b |
388 | sync | 388 | sync |
389 | blr | 389 | blr |
390 | 390 | ||
391 | _GLOBAL(_insl) | 391 | _GLOBAL(_insl) |
392 | cmpwi 0,r5,0 | 392 | cmpwi 0,r5,0 |
393 | mtctr r5 | 393 | mtctr r5 |
394 | subi r4,r4,4 | 394 | subi r4,r4,4 |
395 | blelr- | 395 | blelr- |
396 | 00: lwbrx r5,0,r3 | 396 | 00: lwbrx r5,0,r3 |
397 | eieio | 397 | eieio |
398 | stwu r5,4(r4) | 398 | stwu r5,4(r4) |
399 | bdnz 00b | 399 | bdnz 00b |
400 | twi 0,r5,0 | 400 | twi 0,r5,0 |
401 | isync | 401 | isync |
402 | blr | 402 | blr |
403 | 403 | ||
404 | _GLOBAL(_outsl) | 404 | _GLOBAL(_outsl) |
405 | cmpwi 0,r5,0 | 405 | cmpwi 0,r5,0 |
406 | mtctr r5 | 406 | mtctr r5 |
407 | subi r4,r4,4 | 407 | subi r4,r4,4 |
408 | blelr- | 408 | blelr- |
409 | 00: lwzu r5,4(r4) | 409 | 00: lwzu r5,4(r4) |
410 | stwbrx r5,0,r3 | 410 | stwbrx r5,0,r3 |
411 | bdnz 00b | 411 | bdnz 00b |
412 | sync | 412 | sync |
413 | blr | 413 | blr |
414 | 414 | ||
415 | /* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */ | 415 | /* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */ |
416 | _GLOBAL(_insw_ns) | 416 | _GLOBAL(_insw_ns) |
417 | cmpwi 0,r5,0 | 417 | cmpwi 0,r5,0 |
418 | mtctr r5 | 418 | mtctr r5 |
419 | subi r4,r4,2 | 419 | subi r4,r4,2 |
420 | blelr- | 420 | blelr- |
421 | 00: lhz r5,0(r3) | 421 | 00: lhz r5,0(r3) |
422 | eieio | 422 | eieio |
423 | sthu r5,2(r4) | 423 | sthu r5,2(r4) |
424 | bdnz 00b | 424 | bdnz 00b |
425 | twi 0,r5,0 | 425 | twi 0,r5,0 |
426 | isync | 426 | isync |
427 | blr | 427 | blr |
428 | 428 | ||
429 | /* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */ | 429 | /* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */ |
430 | _GLOBAL(_outsw_ns) | 430 | _GLOBAL(_outsw_ns) |
431 | cmpwi 0,r5,0 | 431 | cmpwi 0,r5,0 |
432 | mtctr r5 | 432 | mtctr r5 |
433 | subi r4,r4,2 | 433 | subi r4,r4,2 |
434 | blelr- | 434 | blelr- |
435 | 00: lhzu r5,2(r4) | 435 | 00: lhzu r5,2(r4) |
436 | sth r5,0(r3) | 436 | sth r5,0(r3) |
437 | bdnz 00b | 437 | bdnz 00b |
438 | sync | 438 | sync |
439 | blr | 439 | blr |
440 | 440 | ||
441 | _GLOBAL(_insl_ns) | 441 | _GLOBAL(_insl_ns) |
442 | cmpwi 0,r5,0 | 442 | cmpwi 0,r5,0 |
443 | mtctr r5 | 443 | mtctr r5 |
444 | subi r4,r4,4 | 444 | subi r4,r4,4 |
445 | blelr- | 445 | blelr- |
446 | 00: lwz r5,0(r3) | 446 | 00: lwz r5,0(r3) |
447 | eieio | 447 | eieio |
448 | stwu r5,4(r4) | 448 | stwu r5,4(r4) |
449 | bdnz 00b | 449 | bdnz 00b |
450 | twi 0,r5,0 | 450 | twi 0,r5,0 |
451 | isync | 451 | isync |
452 | blr | 452 | blr |
453 | 453 | ||
454 | _GLOBAL(_outsl_ns) | 454 | _GLOBAL(_outsl_ns) |
455 | cmpwi 0,r5,0 | 455 | cmpwi 0,r5,0 |
456 | mtctr r5 | 456 | mtctr r5 |
457 | subi r4,r4,4 | 457 | subi r4,r4,4 |
458 | blelr- | 458 | blelr- |
459 | 00: lwzu r5,4(r4) | 459 | 00: lwzu r5,4(r4) |
460 | stw r5,0(r3) | 460 | stw r5,0(r3) |
461 | bdnz 00b | 461 | bdnz 00b |
462 | sync | 462 | sync |
463 | blr | 463 | blr |
464 | 464 | ||
465 | /* | 465 | /* |
466 | * identify_cpu and calls setup_cpu | 466 | * identify_cpu and calls setup_cpu |
467 | * In: r3 = base of the cpu_specs array | 467 | * In: r3 = base of the cpu_specs array |
468 | * r4 = address of cur_cpu_spec | 468 | * r4 = address of cur_cpu_spec |
469 | * r5 = relocation offset | 469 | * r5 = relocation offset |
470 | */ | 470 | */ |
471 | _GLOBAL(identify_cpu) | 471 | _GLOBAL(identify_cpu) |
472 | mfpvr r7 | 472 | mfpvr r7 |
473 | 1: | 473 | 1: |
474 | lwz r8,CPU_SPEC_PVR_MASK(r3) | 474 | lwz r8,CPU_SPEC_PVR_MASK(r3) |
475 | and r8,r8,r7 | 475 | and r8,r8,r7 |
476 | lwz r9,CPU_SPEC_PVR_VALUE(r3) | 476 | lwz r9,CPU_SPEC_PVR_VALUE(r3) |
477 | cmplw 0,r9,r8 | 477 | cmplw 0,r9,r8 |
478 | beq 1f | 478 | beq 1f |
479 | addi r3,r3,CPU_SPEC_ENTRY_SIZE | 479 | addi r3,r3,CPU_SPEC_ENTRY_SIZE |
480 | b 1b | 480 | b 1b |
481 | 1: | 481 | 1: |
482 | sub r0,r3,r5 | 482 | sub r0,r3,r5 |
483 | std r0,0(r4) | 483 | std r0,0(r4) |
484 | ld r4,CPU_SPEC_SETUP(r3) | 484 | ld r4,CPU_SPEC_SETUP(r3) |
485 | add r4,r4,r5 | 485 | add r4,r4,r5 |
486 | ld r4,0(r4) | 486 | ld r4,0(r4) |
487 | add r4,r4,r5 | 487 | add r4,r4,r5 |
488 | mtctr r4 | 488 | mtctr r4 |
489 | /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */ | 489 | /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */ |
490 | mr r4,r3 | 490 | mr r4,r3 |
491 | mr r3,r5 | 491 | mr r3,r5 |
492 | bctr | 492 | bctr |
493 | 493 | ||
494 | /* | 494 | /* |
495 | * do_cpu_ftr_fixups - goes through the list of CPU feature fixups | 495 | * do_cpu_ftr_fixups - goes through the list of CPU feature fixups |
496 | * and writes nop's over sections of code that don't apply for this cpu. | 496 | * and writes nop's over sections of code that don't apply for this cpu. |
497 | * r3 = data offset (not changed) | 497 | * r3 = data offset (not changed) |
498 | */ | 498 | */ |
499 | _GLOBAL(do_cpu_ftr_fixups) | 499 | _GLOBAL(do_cpu_ftr_fixups) |
500 | /* Get CPU 0 features */ | 500 | /* Get CPU 0 features */ |
501 | LOADADDR(r6,cur_cpu_spec) | 501 | LOADADDR(r6,cur_cpu_spec) |
502 | sub r6,r6,r3 | 502 | sub r6,r6,r3 |
503 | ld r4,0(r6) | 503 | ld r4,0(r6) |
504 | sub r4,r4,r3 | 504 | sub r4,r4,r3 |
505 | ld r4,CPU_SPEC_FEATURES(r4) | 505 | ld r4,CPU_SPEC_FEATURES(r4) |
506 | /* Get the fixup table */ | 506 | /* Get the fixup table */ |
507 | LOADADDR(r6,__start___ftr_fixup) | 507 | LOADADDR(r6,__start___ftr_fixup) |
508 | sub r6,r6,r3 | 508 | sub r6,r6,r3 |
509 | LOADADDR(r7,__stop___ftr_fixup) | 509 | LOADADDR(r7,__stop___ftr_fixup) |
510 | sub r7,r7,r3 | 510 | sub r7,r7,r3 |
511 | /* Do the fixup */ | 511 | /* Do the fixup */ |
512 | 1: cmpld r6,r7 | 512 | 1: cmpld r6,r7 |
513 | bgelr | 513 | bgelr |
514 | addi r6,r6,32 | 514 | addi r6,r6,32 |
515 | ld r8,-32(r6) /* mask */ | 515 | ld r8,-32(r6) /* mask */ |
516 | and r8,r8,r4 | 516 | and r8,r8,r4 |
517 | ld r9,-24(r6) /* value */ | 517 | ld r9,-24(r6) /* value */ |
518 | cmpld r8,r9 | 518 | cmpld r8,r9 |
519 | beq 1b | 519 | beq 1b |
520 | ld r8,-16(r6) /* section begin */ | 520 | ld r8,-16(r6) /* section begin */ |
521 | ld r9,-8(r6) /* section end */ | 521 | ld r9,-8(r6) /* section end */ |
522 | subf. r9,r8,r9 | 522 | subf. r9,r8,r9 |
523 | beq 1b | 523 | beq 1b |
524 | /* write nops over the section of code */ | 524 | /* write nops over the section of code */ |
525 | /* todo: if large section, add a branch at the start of it */ | 525 | /* todo: if large section, add a branch at the start of it */ |
526 | srwi r9,r9,2 | 526 | srwi r9,r9,2 |
527 | mtctr r9 | 527 | mtctr r9 |
528 | sub r8,r8,r3 | 528 | sub r8,r8,r3 |
529 | lis r0,0x60000000@h /* nop */ | 529 | lis r0,0x60000000@h /* nop */ |
530 | 3: stw r0,0(r8) | 530 | 3: stw r0,0(r8) |
531 | andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l | 531 | andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l |
532 | beq 2f | 532 | beq 2f |
533 | dcbst 0,r8 /* suboptimal, but simpler */ | 533 | dcbst 0,r8 /* suboptimal, but simpler */ |
534 | sync | 534 | sync |
535 | icbi 0,r8 | 535 | icbi 0,r8 |
536 | 2: addi r8,r8,4 | 536 | 2: addi r8,r8,4 |
537 | bdnz 3b | 537 | bdnz 3b |
538 | sync /* additional sync needed on g4 */ | 538 | sync /* additional sync needed on g4 */ |
539 | isync | 539 | isync |
540 | b 1b | 540 | b 1b |
541 | 541 | ||
542 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) | 542 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) |
543 | /* | 543 | /* |
544 | * Do an IO access in real mode | 544 | * Do an IO access in real mode |
545 | */ | 545 | */ |
546 | _GLOBAL(real_readb) | 546 | _GLOBAL(real_readb) |
547 | mfmsr r7 | 547 | mfmsr r7 |
548 | ori r0,r7,MSR_DR | 548 | ori r0,r7,MSR_DR |
549 | xori r0,r0,MSR_DR | 549 | xori r0,r0,MSR_DR |
550 | sync | 550 | sync |
551 | mtmsrd r0 | 551 | mtmsrd r0 |
552 | sync | 552 | sync |
553 | isync | 553 | isync |
554 | mfspr r6,SPRN_HID4 | 554 | mfspr r6,SPRN_HID4 |
555 | rldicl r5,r6,32,0 | 555 | rldicl r5,r6,32,0 |
556 | ori r5,r5,0x100 | 556 | ori r5,r5,0x100 |
557 | rldicl r5,r5,32,0 | 557 | rldicl r5,r5,32,0 |
558 | sync | 558 | sync |
559 | mtspr SPRN_HID4,r5 | 559 | mtspr SPRN_HID4,r5 |
560 | isync | 560 | isync |
561 | slbia | 561 | slbia |
562 | isync | 562 | isync |
563 | lbz r3,0(r3) | 563 | lbz r3,0(r3) |
564 | sync | 564 | sync |
565 | mtspr SPRN_HID4,r6 | 565 | mtspr SPRN_HID4,r6 |
566 | isync | 566 | isync |
567 | slbia | 567 | slbia |
568 | isync | 568 | isync |
569 | mtmsrd r7 | 569 | mtmsrd r7 |
570 | sync | 570 | sync |
571 | isync | 571 | isync |
572 | blr | 572 | blr |
573 | 573 | ||
574 | /* | 574 | /* |
575 | * Do an IO access in real mode | 575 | * Do an IO access in real mode |
576 | */ | 576 | */ |
577 | _GLOBAL(real_writeb) | 577 | _GLOBAL(real_writeb) |
578 | mfmsr r7 | 578 | mfmsr r7 |
579 | ori r0,r7,MSR_DR | 579 | ori r0,r7,MSR_DR |
580 | xori r0,r0,MSR_DR | 580 | xori r0,r0,MSR_DR |
581 | sync | 581 | sync |
582 | mtmsrd r0 | 582 | mtmsrd r0 |
583 | sync | 583 | sync |
584 | isync | 584 | isync |
585 | mfspr r6,SPRN_HID4 | 585 | mfspr r6,SPRN_HID4 |
586 | rldicl r5,r6,32,0 | 586 | rldicl r5,r6,32,0 |
587 | ori r5,r5,0x100 | 587 | ori r5,r5,0x100 |
588 | rldicl r5,r5,32,0 | 588 | rldicl r5,r5,32,0 |
589 | sync | 589 | sync |
590 | mtspr SPRN_HID4,r5 | 590 | mtspr SPRN_HID4,r5 |
591 | isync | 591 | isync |
592 | slbia | 592 | slbia |
593 | isync | 593 | isync |
594 | stb r3,0(r4) | 594 | stb r3,0(r4) |
595 | sync | 595 | sync |
596 | mtspr SPRN_HID4,r6 | 596 | mtspr SPRN_HID4,r6 |
597 | isync | 597 | isync |
598 | slbia | 598 | slbia |
599 | isync | 599 | isync |
600 | mtmsrd r7 | 600 | mtmsrd r7 |
601 | sync | 601 | sync |
602 | isync | 602 | isync |
603 | blr | 603 | blr |
604 | #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ | 604 | #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ |
605 | 605 | ||
606 | /* | 606 | /* |
607 | * SCOM access functions for 970 (FX only for now) | 607 | * SCOM access functions for 970 (FX only for now) |
608 | * | 608 | * |
609 | * unsigned long scom970_read(unsigned int address); | 609 | * unsigned long scom970_read(unsigned int address); |
610 | * void scom970_write(unsigned int address, unsigned long value); | 610 | * void scom970_write(unsigned int address, unsigned long value); |
611 | * | 611 | * |
612 | * The address passed in is the 24 bits register address. This code | 612 | * The address passed in is the 24 bits register address. This code |
613 | * is 970 specific and will not check the status bits, so you should | 613 | * is 970 specific and will not check the status bits, so you should |
614 | * know what you are doing. | 614 | * know what you are doing. |
615 | */ | 615 | */ |
616 | _GLOBAL(scom970_read) | 616 | _GLOBAL(scom970_read) |
617 | /* interrupts off */ | 617 | /* interrupts off */ |
618 | mfmsr r4 | 618 | mfmsr r4 |
619 | ori r0,r4,MSR_EE | 619 | ori r0,r4,MSR_EE |
620 | xori r0,r0,MSR_EE | 620 | xori r0,r0,MSR_EE |
621 | mtmsrd r0,1 | 621 | mtmsrd r0,1 |
622 | 622 | ||
623 | /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits | 623 | /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits |
624 | * (including parity). On current CPUs they must be 0'd, | 624 | * (including parity). On current CPUs they must be 0'd, |
625 | * and finally or in RW bit | 625 | * and finally or in RW bit |
626 | */ | 626 | */ |
627 | rlwinm r3,r3,8,0,15 | 627 | rlwinm r3,r3,8,0,15 |
628 | ori r3,r3,0x8000 | 628 | ori r3,r3,0x8000 |
629 | 629 | ||
630 | /* do the actual scom read */ | 630 | /* do the actual scom read */ |
631 | sync | 631 | sync |
632 | mtspr SPRN_SCOMC,r3 | 632 | mtspr SPRN_SCOMC,r3 |
633 | isync | 633 | isync |
634 | mfspr r3,SPRN_SCOMD | 634 | mfspr r3,SPRN_SCOMD |
635 | isync | 635 | isync |
636 | mfspr r0,SPRN_SCOMC | 636 | mfspr r0,SPRN_SCOMC |
637 | isync | 637 | isync |
638 | 638 | ||
639 | /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah | 639 | /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah |
640 | * that's the best we can do). Not implemented yet as we don't use | 640 | * that's the best we can do). Not implemented yet as we don't use |
641 | * the scom on any of the bogus CPUs yet, but may have to be done | 641 | * the scom on any of the bogus CPUs yet, but may have to be done |
642 | * ultimately | 642 | * ultimately |
643 | */ | 643 | */ |
644 | 644 | ||
645 | /* restore interrupts */ | 645 | /* restore interrupts */ |
646 | mtmsrd r4,1 | 646 | mtmsrd r4,1 |
647 | blr | 647 | blr |
648 | 648 | ||
649 | 649 | ||
650 | _GLOBAL(scom970_write) | 650 | _GLOBAL(scom970_write) |
651 | /* interrupts off */ | 651 | /* interrupts off */ |
652 | mfmsr r5 | 652 | mfmsr r5 |
653 | ori r0,r5,MSR_EE | 653 | ori r0,r5,MSR_EE |
654 | xori r0,r0,MSR_EE | 654 | xori r0,r0,MSR_EE |
655 | mtmsrd r0,1 | 655 | mtmsrd r0,1 |
656 | 656 | ||
657 | /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits | 657 | /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits |
658 | * (including parity). On current CPUs they must be 0'd. | 658 | * (including parity). On current CPUs they must be 0'd. |
659 | */ | 659 | */ |
660 | 660 | ||
661 | rlwinm r3,r3,8,0,15 | 661 | rlwinm r3,r3,8,0,15 |
662 | 662 | ||
663 | sync | 663 | sync |
664 | mtspr SPRN_SCOMD,r4 /* write data */ | 664 | mtspr SPRN_SCOMD,r4 /* write data */ |
665 | isync | 665 | isync |
666 | mtspr SPRN_SCOMC,r3 /* write command */ | 666 | mtspr SPRN_SCOMC,r3 /* write command */ |
667 | isync | 667 | isync |
668 | mfspr 3,SPRN_SCOMC | 668 | mfspr 3,SPRN_SCOMC |
669 | isync | 669 | isync |
670 | 670 | ||
671 | /* restore interrupts */ | 671 | /* restore interrupts */ |
672 | mtmsrd r5,1 | 672 | mtmsrd r5,1 |
673 | blr | 673 | blr |
674 | 674 | ||
675 | 675 | ||
676 | /* | 676 | /* |
677 | * Create a kernel thread | 677 | * Create a kernel thread |
678 | * kernel_thread(fn, arg, flags) | 678 | * kernel_thread(fn, arg, flags) |
679 | */ | 679 | */ |
680 | _GLOBAL(kernel_thread) | 680 | _GLOBAL(kernel_thread) |
681 | std r29,-24(r1) | 681 | std r29,-24(r1) |
682 | std r30,-16(r1) | 682 | std r30,-16(r1) |
683 | stdu r1,-STACK_FRAME_OVERHEAD(r1) | 683 | stdu r1,-STACK_FRAME_OVERHEAD(r1) |
684 | mr r29,r3 | 684 | mr r29,r3 |
685 | mr r30,r4 | 685 | mr r30,r4 |
686 | ori r3,r5,CLONE_VM /* flags */ | 686 | ori r3,r5,CLONE_VM /* flags */ |
687 | oris r3,r3,(CLONE_UNTRACED>>16) | 687 | oris r3,r3,(CLONE_UNTRACED>>16) |
688 | li r4,0 /* new sp (unused) */ | 688 | li r4,0 /* new sp (unused) */ |
689 | li r0,__NR_clone | 689 | li r0,__NR_clone |
690 | sc | 690 | sc |
691 | cmpdi 0,r3,0 /* parent or child? */ | 691 | cmpdi 0,r3,0 /* parent or child? */ |
692 | bne 1f /* return if parent */ | 692 | bne 1f /* return if parent */ |
693 | li r0,0 | 693 | li r0,0 |
694 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | 694 | stdu r0,-STACK_FRAME_OVERHEAD(r1) |
695 | ld r2,8(r29) | 695 | ld r2,8(r29) |
696 | ld r29,0(r29) | 696 | ld r29,0(r29) |
697 | mtlr r29 /* fn addr in lr */ | 697 | mtlr r29 /* fn addr in lr */ |
698 | mr r3,r30 /* load arg and call fn */ | 698 | mr r3,r30 /* load arg and call fn */ |
699 | blrl | 699 | blrl |
700 | li r0,__NR_exit /* exit after child exits */ | 700 | li r0,__NR_exit /* exit after child exits */ |
701 | li r3,0 | 701 | li r3,0 |
702 | sc | 702 | sc |
703 | 1: addi r1,r1,STACK_FRAME_OVERHEAD | 703 | 1: addi r1,r1,STACK_FRAME_OVERHEAD |
704 | ld r29,-24(r1) | 704 | ld r29,-24(r1) |
705 | ld r30,-16(r1) | 705 | ld r30,-16(r1) |
706 | blr | 706 | blr |
707 | 707 | ||
708 | /* | 708 | /* |
709 | * disable_kernel_fp() | 709 | * disable_kernel_fp() |
710 | * Disable the FPU. | 710 | * Disable the FPU. |
711 | */ | 711 | */ |
712 | _GLOBAL(disable_kernel_fp) | 712 | _GLOBAL(disable_kernel_fp) |
713 | mfmsr r3 | 713 | mfmsr r3 |
714 | rldicl r0,r3,(63-MSR_FP_LG),1 | 714 | rldicl r0,r3,(63-MSR_FP_LG),1 |
715 | rldicl r3,r0,(MSR_FP_LG+1),0 | 715 | rldicl r3,r0,(MSR_FP_LG+1),0 |
716 | mtmsrd r3 /* disable use of fpu now */ | 716 | mtmsrd r3 /* disable use of fpu now */ |
717 | isync | 717 | isync |
718 | blr | 718 | blr |
719 | 719 | ||
720 | #ifdef CONFIG_ALTIVEC | 720 | #ifdef CONFIG_ALTIVEC |
721 | 721 | ||
722 | #if 0 /* this has no callers for now */ | 722 | #if 0 /* this has no callers for now */ |
723 | /* | 723 | /* |
724 | * disable_kernel_altivec() | 724 | * disable_kernel_altivec() |
725 | * Disable the VMX. | 725 | * Disable the VMX. |
726 | */ | 726 | */ |
727 | _GLOBAL(disable_kernel_altivec) | 727 | _GLOBAL(disable_kernel_altivec) |
728 | mfmsr r3 | 728 | mfmsr r3 |
729 | rldicl r0,r3,(63-MSR_VEC_LG),1 | 729 | rldicl r0,r3,(63-MSR_VEC_LG),1 |
730 | rldicl r3,r0,(MSR_VEC_LG+1),0 | 730 | rldicl r3,r0,(MSR_VEC_LG+1),0 |
731 | mtmsrd r3 /* disable use of VMX now */ | 731 | mtmsrd r3 /* disable use of VMX now */ |
732 | isync | 732 | isync |
733 | blr | 733 | blr |
734 | #endif /* 0 */ | 734 | #endif /* 0 */ |
735 | 735 | ||
736 | /* | 736 | /* |
737 | * giveup_altivec(tsk) | 737 | * giveup_altivec(tsk) |
738 | * Disable VMX for the task given as the argument, | 738 | * Disable VMX for the task given as the argument, |
739 | * and save the vector registers in its thread_struct. | 739 | * and save the vector registers in its thread_struct. |
740 | * Enables the VMX for use in the kernel on return. | 740 | * Enables the VMX for use in the kernel on return. |
741 | */ | 741 | */ |
742 | _GLOBAL(giveup_altivec) | 742 | _GLOBAL(giveup_altivec) |
743 | mfmsr r5 | 743 | mfmsr r5 |
744 | oris r5,r5,MSR_VEC@h | 744 | oris r5,r5,MSR_VEC@h |
745 | mtmsrd r5 /* enable use of VMX now */ | 745 | mtmsrd r5 /* enable use of VMX now */ |
746 | isync | 746 | isync |
747 | cmpdi 0,r3,0 | 747 | cmpdi 0,r3,0 |
748 | beqlr- /* if no previous owner, done */ | 748 | beqlr- /* if no previous owner, done */ |
749 | addi r3,r3,THREAD /* want THREAD of task */ | 749 | addi r3,r3,THREAD /* want THREAD of task */ |
750 | ld r5,PT_REGS(r3) | 750 | ld r5,PT_REGS(r3) |
751 | cmpdi 0,r5,0 | 751 | cmpdi 0,r5,0 |
752 | SAVE_32VRS(0,r4,r3) | 752 | SAVE_32VRS(0,r4,r3) |
753 | mfvscr vr0 | 753 | mfvscr vr0 |
754 | li r4,THREAD_VSCR | 754 | li r4,THREAD_VSCR |
755 | stvx vr0,r4,r3 | 755 | stvx vr0,r4,r3 |
756 | beq 1f | 756 | beq 1f |
757 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | 757 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
758 | lis r3,MSR_VEC@h | 758 | lis r3,MSR_VEC@h |
759 | andc r4,r4,r3 /* disable FP for previous task */ | 759 | andc r4,r4,r3 /* disable FP for previous task */ |
760 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | 760 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
761 | 1: | 761 | 1: |
762 | #ifndef CONFIG_SMP | 762 | #ifndef CONFIG_SMP |
763 | li r5,0 | 763 | li r5,0 |
764 | ld r4,last_task_used_altivec@got(r2) | 764 | ld r4,last_task_used_altivec@got(r2) |
765 | std r5,0(r4) | 765 | std r5,0(r4) |
766 | #endif /* CONFIG_SMP */ | 766 | #endif /* CONFIG_SMP */ |
767 | blr | 767 | blr |
768 | 768 | ||
769 | #endif /* CONFIG_ALTIVEC */ | 769 | #endif /* CONFIG_ALTIVEC */ |
770 | 770 | ||
771 | _GLOBAL(__setup_cpu_power3) | 771 | _GLOBAL(__setup_cpu_power3) |
772 | blr | 772 | blr |
773 | 773 | ||
774 | _GLOBAL(execve) | 774 | _GLOBAL(execve) |
775 | li r0,__NR_execve | 775 | li r0,__NR_execve |
776 | sc | 776 | sc |
777 | bnslr | 777 | bnslr |
778 | neg r3,r3 | 778 | neg r3,r3 |
779 | blr | 779 | blr |
780 | 780 | ||
781 | /* kexec_wait(phys_cpu) | 781 | /* kexec_wait(phys_cpu) |
782 | * | 782 | * |
783 | * wait for the flag to change, indicating this kernel is going away but | 783 | * wait for the flag to change, indicating this kernel is going away but |
784 | * the slave code for the next one is at addresses 0 to 100. | 784 | * the slave code for the next one is at addresses 0 to 100. |
785 | * | 785 | * |
786 | * This is used by all slaves. | 786 | * This is used by all slaves. |
787 | * | 787 | * |
788 | * Physical (hardware) cpu id should be in r3. | 788 | * Physical (hardware) cpu id should be in r3. |
789 | */ | 789 | */ |
790 | _GLOBAL(kexec_wait) | 790 | _GLOBAL(kexec_wait) |
791 | bl 1f | 791 | bl 1f |
792 | 1: mflr r5 | 792 | 1: mflr r5 |
793 | addi r5,r5,kexec_flag-1b | 793 | addi r5,r5,kexec_flag-1b |
794 | 794 | ||
795 | 99: HMT_LOW | 795 | 99: HMT_LOW |
796 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ | 796 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ |
797 | lwz r4,0(r5) | 797 | lwz r4,0(r5) |
798 | cmpwi 0,r4,0 | 798 | cmpwi 0,r4,0 |
799 | bnea 0x60 | 799 | bnea 0x60 |
800 | #endif | 800 | #endif |
801 | b 99b | 801 | b 99b |
802 | 802 | ||
803 | /* this can be in text because we won't change it until we are | 803 | /* this can be in text because we won't change it until we are |
804 | * running in real anyways | 804 | * running in real anyways |
805 | */ | 805 | */ |
806 | kexec_flag: | 806 | kexec_flag: |
807 | .long 0 | 807 | .long 0 |
808 | 808 | ||
809 | 809 | ||
810 | #ifdef CONFIG_KEXEC | 810 | #ifdef CONFIG_KEXEC |
811 | 811 | ||
812 | /* kexec_smp_wait(void) | 812 | /* kexec_smp_wait(void) |
813 | * | 813 | * |
814 | * call with interrupts off | 814 | * call with interrupts off |
815 | * note: this is a terminal routine, it does not save lr | 815 | * note: this is a terminal routine, it does not save lr |
816 | * | 816 | * |
817 | * get phys id from paca | 817 | * get phys id from paca |
818 | * set paca id to -1 to say we got here | 818 | * set paca id to -1 to say we got here |
819 | * switch to real mode | 819 | * switch to real mode |
820 | * join other cpus in kexec_wait(phys_id) | 820 | * join other cpus in kexec_wait(phys_id) |
821 | */ | 821 | */ |
822 | _GLOBAL(kexec_smp_wait) | 822 | _GLOBAL(kexec_smp_wait) |
823 | lhz r3,PACAHWCPUID(r13) | 823 | lhz r3,PACAHWCPUID(r13) |
824 | li r4,-1 | 824 | li r4,-1 |
825 | sth r4,PACAHWCPUID(r13) /* let others know we left */ | 825 | sth r4,PACAHWCPUID(r13) /* let others know we left */ |
826 | bl real_mode | 826 | bl real_mode |
827 | b .kexec_wait | 827 | b .kexec_wait |
828 | 828 | ||
829 | /* | 829 | /* |
830 | * switch to real mode (turn mmu off) | 830 | * switch to real mode (turn mmu off) |
831 | * we use the early kernel trick that the hardware ignores bits | 831 | * we use the early kernel trick that the hardware ignores bits |
832 | * 0 and 1 (big endian) of the effective address in real mode | 832 | * 0 and 1 (big endian) of the effective address in real mode |
833 | * | 833 | * |
834 | * don't overwrite r3 here, it is live for kexec_wait above. | 834 | * don't overwrite r3 here, it is live for kexec_wait above. |
835 | */ | 835 | */ |
836 | real_mode: /* assume normal blr return */ | 836 | real_mode: /* assume normal blr return */ |
837 | 1: li r9,MSR_RI | 837 | 1: li r9,MSR_RI |
838 | li r10,MSR_DR|MSR_IR | 838 | li r10,MSR_DR|MSR_IR |
839 | mflr r11 /* return address to SRR0 */ | 839 | mflr r11 /* return address to SRR0 */ |
840 | mfmsr r12 | 840 | mfmsr r12 |
841 | andc r9,r12,r9 | 841 | andc r9,r12,r9 |
842 | andc r10,r12,r10 | 842 | andc r10,r12,r10 |
843 | 843 | ||
844 | mtmsrd r9,1 | 844 | mtmsrd r9,1 |
845 | mtspr SPRN_SRR1,r10 | 845 | mtspr SPRN_SRR1,r10 |
846 | mtspr SPRN_SRR0,r11 | 846 | mtspr SPRN_SRR0,r11 |
847 | rfid | 847 | rfid |
848 | 848 | ||
849 | 849 | ||
850 | /* | 850 | /* |
851 | * kexec_sequence(newstack, start, image, control, clear_all()) | 851 | * kexec_sequence(newstack, start, image, control, clear_all()) |
852 | * | 852 | * |
853 | * does the grungy work with stack switching and real mode switches | 853 | * does the grungy work with stack switching and real mode switches |
854 | * also does simple calls to other code | 854 | * also does simple calls to other code |
855 | */ | 855 | */ |
856 | 856 | ||
857 | _GLOBAL(kexec_sequence) | 857 | _GLOBAL(kexec_sequence) |
858 | mflr r0 | 858 | mflr r0 |
859 | std r0,16(r1) | 859 | std r0,16(r1) |
860 | 860 | ||
861 | /* switch stacks to newstack -- &kexec_stack.stack */ | 861 | /* switch stacks to newstack -- &kexec_stack.stack */ |
862 | stdu r1,THREAD_SIZE-112(r3) | 862 | stdu r1,THREAD_SIZE-112(r3) |
863 | mr r1,r3 | 863 | mr r1,r3 |
864 | 864 | ||
865 | li r0,0 | 865 | li r0,0 |
866 | std r0,16(r1) | 866 | std r0,16(r1) |
867 | 867 | ||
868 | /* save regs for local vars on new stack. | 868 | /* save regs for local vars on new stack. |
869 | * yes, we won't go back, but ... | 869 | * yes, we won't go back, but ... |
870 | */ | 870 | */ |
871 | std r31,-8(r1) | 871 | std r31,-8(r1) |
872 | std r30,-16(r1) | 872 | std r30,-16(r1) |
873 | std r29,-24(r1) | 873 | std r29,-24(r1) |
874 | std r28,-32(r1) | 874 | std r28,-32(r1) |
875 | std r27,-40(r1) | 875 | std r27,-40(r1) |
876 | std r26,-48(r1) | 876 | std r26,-48(r1) |
877 | std r25,-56(r1) | 877 | std r25,-56(r1) |
878 | 878 | ||
879 | stdu r1,-112-64(r1) | 879 | stdu r1,-112-64(r1) |
880 | 880 | ||
881 | /* save args into preserved regs */ | 881 | /* save args into preserved regs */ |
882 | mr r31,r3 /* newstack (both) */ | 882 | mr r31,r3 /* newstack (both) */ |
883 | mr r30,r4 /* start (real) */ | 883 | mr r30,r4 /* start (real) */ |
884 | mr r29,r5 /* image (virt) */ | 884 | mr r29,r5 /* image (virt) */ |
885 | mr r28,r6 /* control, unused */ | 885 | mr r28,r6 /* control, unused */ |
886 | mr r27,r7 /* clear_all() fn desc */ | 886 | mr r27,r7 /* clear_all() fn desc */ |
887 | mr r26,r8 /* spare */ | 887 | mr r26,r8 /* spare */ |
888 | lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ | 888 | lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ |
889 | 889 | ||
890 | /* disable interrupts, we are overwriting kernel data next */ | 890 | /* disable interrupts, we are overwriting kernel data next */ |
891 | mfmsr r3 | 891 | mfmsr r3 |
892 | rlwinm r3,r3,0,17,15 | 892 | rlwinm r3,r3,0,17,15 |
893 | mtmsrd r3,1 | 893 | mtmsrd r3,1 |
894 | 894 | ||
895 | /* copy dest pages, flush whole dest image */ | 895 | /* copy dest pages, flush whole dest image */ |
896 | mr r3,r29 | 896 | mr r3,r29 |
897 | bl .kexec_copy_flush /* (image) */ | 897 | bl .kexec_copy_flush /* (image) */ |
898 | 898 | ||
899 | /* turn off mmu */ | 899 | /* turn off mmu */ |
900 | bl real_mode | 900 | bl real_mode |
901 | 901 | ||
902 | /* clear out hardware hash page table and tlb */ | 902 | /* clear out hardware hash page table and tlb */ |
903 | ld r5,0(r27) /* deref function descriptor */ | 903 | ld r5,0(r27) /* deref function descriptor */ |
904 | mtctr r5 | 904 | mtctr r5 |
905 | bctrl /* ppc_md.hash_clear_all(void); */ | 905 | bctrl /* ppc_md.hash_clear_all(void); */ |
906 | 906 | ||
907 | /* | 907 | /* |
908 | * kexec image calling is: | 908 | * kexec image calling is: |
909 | * the first 0x100 bytes of the entry point are copied to 0 | 909 | * the first 0x100 bytes of the entry point are copied to 0 |
910 | * | 910 | * |
911 | * all slaves branch to slave = 0x60 (absolute) | 911 | * all slaves branch to slave = 0x60 (absolute) |
912 | * slave(phys_cpu_id); | 912 | * slave(phys_cpu_id); |
913 | * | 913 | * |
914 | * master goes to start = entry point | 914 | * master goes to start = entry point |
915 | * start(phys_cpu_id, start, 0); | 915 | * start(phys_cpu_id, start, 0); |
916 | * | 916 | * |
917 | * | 917 | * |
918 | * a wrapper is needed to call existing kernels, here is an approximate | 918 | * a wrapper is needed to call existing kernels, here is an approximate |
919 | * description of one method: | 919 | * description of one method: |
920 | * | 920 | * |
921 | * v2: (2.6.10) | 921 | * v2: (2.6.10) |
922 | * start will be near the boot_block (maybe 0x100 bytes before it?) | 922 | * start will be near the boot_block (maybe 0x100 bytes before it?) |
923 | * it will have a 0x60, which will b to boot_block, where it will wait | 923 | * it will have a 0x60, which will b to boot_block, where it will wait |
924 | * and 0 will store phys into struct boot-block and load r3 from there, | 924 | * and 0 will store phys into struct boot-block and load r3 from there, |
925 | * copy kernel 0-0x100 and tell slaves to back down to 0x60 again | 925 | * copy kernel 0-0x100 and tell slaves to back down to 0x60 again |
926 | * | 926 | * |
927 | * v1: (2.6.9) | 927 | * v1: (2.6.9) |
928 | * boot block will have all cpus scanning device tree to see if they | 928 | * boot block will have all cpus scanning device tree to see if they |
929 | * are the boot cpu ????? | 929 | * are the boot cpu ????? |
930 | * other device tree differences (prop sizes, va vs pa, etc)... | 930 | * other device tree differences (prop sizes, va vs pa, etc)... |
931 | */ | 931 | */ |
932 | 932 | ||
933 | /* copy 0x100 bytes starting at start to 0 */ | 933 | /* copy 0x100 bytes starting at start to 0 */ |
934 | li r3,0 | 934 | li r3,0 |
935 | mr r4,r30 | 935 | mr r4,r30 |
936 | li r5,0x100 | 936 | li r5,0x100 |
937 | li r6,0 | 937 | li r6,0 |
938 | bl .copy_and_flush /* (dest, src, copy limit, start offset) */ | 938 | bl .copy_and_flush /* (dest, src, copy limit, start offset) */ |
939 | 1: /* assume normal blr return */ | 939 | 1: /* assume normal blr return */ |
940 | 940 | ||
941 | /* release other cpus to the new kernel secondary start at 0x60 */ | 941 | /* release other cpus to the new kernel secondary start at 0x60 */ |
942 | mflr r5 | 942 | mflr r5 |
943 | li r6,1 | 943 | li r6,1 |
944 | stw r6,kexec_flag-1b(5) | 944 | stw r6,kexec_flag-1b(5) |
945 | mr r3,r25 # my phys cpu | 945 | mr r3,r25 # my phys cpu |
946 | mr r4,r30 # start, aka phys mem offset | 946 | mr r4,r30 # start, aka phys mem offset |
947 | mtlr 4 | 947 | mtlr 4 |
948 | li r5,0 | 948 | li r5,0 |
949 | blr /* image->start(physid, image->start, 0); */ | 949 | blr /* image->start(physid, image->start, 0); */ |
950 | #endif /* CONFIG_KEXEC */ | 950 | #endif /* CONFIG_KEXEC */ |
951 | 951 |
arch/powerpc/platforms/iseries/irq.c
1 | /* | 1 | /* |
2 | * This module supports the iSeries PCI bus interrupt handling | 2 | * This module supports the iSeries PCI bus interrupt handling |
3 | * Copyright (C) 20yy <Robert L Holtorf> <IBM Corp> | 3 | * Copyright (C) 20yy <Robert L Holtorf> <IBM Corp> |
4 | * Copyright (C) 2004-2005 IBM Corporation | 4 | * Copyright (C) 2004-2005 IBM Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the: | 17 | * along with this program; if not, write to the: |
18 | * Free Software Foundation, Inc., | 18 | * Free Software Foundation, Inc., |
19 | * 59 Temple Place, Suite 330, | 19 | * 59 Temple Place, Suite 330, |
20 | * Boston, MA 02111-1307 USA | 20 | * Boston, MA 02111-1307 USA |
21 | * | 21 | * |
22 | * Change Activity: | 22 | * Change Activity: |
23 | * Created, December 13, 2000 by Wayne Holm | 23 | * Created, December 13, 2000 by Wayne Holm |
24 | * End Change Activity | 24 | * End Change Activity |
25 | */ | 25 | */ |
26 | #include <linux/config.h> | 26 | #include <linux/config.h> |
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/threads.h> | 29 | #include <linux/threads.h> |
30 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
31 | #include <linux/param.h> | 31 | #include <linux/param.h> |
32 | #include <linux/string.h> | 32 | #include <linux/string.h> |
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | #include <linux/ide.h> | 34 | #include <linux/ide.h> |
35 | #include <linux/irq.h> | 35 | #include <linux/irq.h> |
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | 37 | ||
38 | #include <asm/iseries/hv_types.h> | 38 | #include <asm/iseries/hv_types.h> |
39 | #include <asm/iseries/hv_lp_event.h> | 39 | #include <asm/iseries/hv_lp_event.h> |
40 | #include <asm/iseries/hv_call_xm.h> | 40 | #include <asm/iseries/hv_call_xm.h> |
41 | 41 | ||
42 | #include "irq.h" | 42 | #include "irq.h" |
43 | #include "call_pci.h" | 43 | #include "call_pci.h" |
44 | 44 | ||
45 | /* This maps virtual irq numbers to real irqs */ | 45 | /* This maps virtual irq numbers to real irqs */ |
46 | unsigned int virt_irq_to_real_map[NR_IRQS]; | 46 | unsigned int virt_irq_to_real_map[NR_IRQS]; |
47 | 47 | ||
48 | /* The next available virtual irq number */ | 48 | /* The next available virtual irq number */ |
49 | /* Note: the pcnet32 driver assumes irq numbers < 2 aren't valid. :( */ | 49 | /* Note: the pcnet32 driver assumes irq numbers < 2 aren't valid. :( */ |
50 | static int next_virtual_irq = 2; | 50 | static int next_virtual_irq = 2; |
51 | 51 | ||
52 | static long Pci_Interrupt_Count; | 52 | static long Pci_Interrupt_Count; |
53 | static long Pci_Event_Count; | 53 | static long Pci_Event_Count; |
54 | 54 | ||
55 | enum XmPciLpEvent_Subtype { | 55 | enum XmPciLpEvent_Subtype { |
56 | XmPciLpEvent_BusCreated = 0, // PHB has been created | 56 | XmPciLpEvent_BusCreated = 0, // PHB has been created |
57 | XmPciLpEvent_BusError = 1, // PHB has failed | 57 | XmPciLpEvent_BusError = 1, // PHB has failed |
58 | XmPciLpEvent_BusFailed = 2, // Msg to Secondary, Primary failed bus | 58 | XmPciLpEvent_BusFailed = 2, // Msg to Secondary, Primary failed bus |
59 | XmPciLpEvent_NodeFailed = 4, // Multi-adapter bridge has failed | 59 | XmPciLpEvent_NodeFailed = 4, // Multi-adapter bridge has failed |
60 | XmPciLpEvent_NodeRecovered = 5, // Multi-adapter bridge has recovered | 60 | XmPciLpEvent_NodeRecovered = 5, // Multi-adapter bridge has recovered |
61 | XmPciLpEvent_BusRecovered = 12, // PHB has been recovered | 61 | XmPciLpEvent_BusRecovered = 12, // PHB has been recovered |
62 | XmPciLpEvent_UnQuiesceBus = 18, // Secondary bus unqiescing | 62 | XmPciLpEvent_UnQuiesceBus = 18, // Secondary bus unqiescing |
63 | XmPciLpEvent_BridgeError = 21, // Bridge Error | 63 | XmPciLpEvent_BridgeError = 21, // Bridge Error |
64 | XmPciLpEvent_SlotInterrupt = 22 // Slot interrupt | 64 | XmPciLpEvent_SlotInterrupt = 22 // Slot interrupt |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct XmPciLpEvent_BusInterrupt { | 67 | struct XmPciLpEvent_BusInterrupt { |
68 | HvBusNumber busNumber; | 68 | HvBusNumber busNumber; |
69 | HvSubBusNumber subBusNumber; | 69 | HvSubBusNumber subBusNumber; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | struct XmPciLpEvent_NodeInterrupt { | 72 | struct XmPciLpEvent_NodeInterrupt { |
73 | HvBusNumber busNumber; | 73 | HvBusNumber busNumber; |
74 | HvSubBusNumber subBusNumber; | 74 | HvSubBusNumber subBusNumber; |
75 | HvAgentId deviceId; | 75 | HvAgentId deviceId; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | struct XmPciLpEvent { | 78 | struct XmPciLpEvent { |
79 | struct HvLpEvent hvLpEvent; | 79 | struct HvLpEvent hvLpEvent; |
80 | 80 | ||
81 | union { | 81 | union { |
82 | u64 alignData; // Align on an 8-byte boundary | 82 | u64 alignData; // Align on an 8-byte boundary |
83 | 83 | ||
84 | struct { | 84 | struct { |
85 | u32 fisr; | 85 | u32 fisr; |
86 | HvBusNumber busNumber; | 86 | HvBusNumber busNumber; |
87 | HvSubBusNumber subBusNumber; | 87 | HvSubBusNumber subBusNumber; |
88 | HvAgentId deviceId; | 88 | HvAgentId deviceId; |
89 | } slotInterrupt; | 89 | } slotInterrupt; |
90 | 90 | ||
91 | struct XmPciLpEvent_BusInterrupt busFailed; | 91 | struct XmPciLpEvent_BusInterrupt busFailed; |
92 | struct XmPciLpEvent_BusInterrupt busRecovered; | 92 | struct XmPciLpEvent_BusInterrupt busRecovered; |
93 | struct XmPciLpEvent_BusInterrupt busCreated; | 93 | struct XmPciLpEvent_BusInterrupt busCreated; |
94 | 94 | ||
95 | struct XmPciLpEvent_NodeInterrupt nodeFailed; | 95 | struct XmPciLpEvent_NodeInterrupt nodeFailed; |
96 | struct XmPciLpEvent_NodeInterrupt nodeRecovered; | 96 | struct XmPciLpEvent_NodeInterrupt nodeRecovered; |
97 | 97 | ||
98 | } eventData; | 98 | } eventData; |
99 | 99 | ||
100 | }; | 100 | }; |
101 | 101 | ||
102 | static void intReceived(struct XmPciLpEvent *eventParm, | 102 | static void intReceived(struct XmPciLpEvent *eventParm, |
103 | struct pt_regs *regsParm) | 103 | struct pt_regs *regsParm) |
104 | { | 104 | { |
105 | int irq; | 105 | int irq; |
106 | #ifdef CONFIG_IRQSTACKS | 106 | #ifdef CONFIG_IRQSTACKS |
107 | struct thread_info *curtp, *irqtp; | 107 | struct thread_info *curtp, *irqtp; |
108 | #endif | 108 | #endif |
109 | 109 | ||
110 | ++Pci_Interrupt_Count; | 110 | ++Pci_Interrupt_Count; |
111 | 111 | ||
112 | switch (eventParm->hvLpEvent.xSubtype) { | 112 | switch (eventParm->hvLpEvent.xSubtype) { |
113 | case XmPciLpEvent_SlotInterrupt: | 113 | case XmPciLpEvent_SlotInterrupt: |
114 | irq = eventParm->hvLpEvent.xCorrelationToken; | 114 | irq = eventParm->hvLpEvent.xCorrelationToken; |
115 | /* Dispatch the interrupt handlers for this irq */ | 115 | /* Dispatch the interrupt handlers for this irq */ |
116 | #ifdef CONFIG_IRQSTACKS | 116 | #ifdef CONFIG_IRQSTACKS |
117 | /* Switch to the irq stack to handle this */ | 117 | /* Switch to the irq stack to handle this */ |
118 | curtp = current_thread_info(); | 118 | curtp = current_thread_info(); |
119 | irqtp = hardirq_ctx[smp_processor_id()]; | 119 | irqtp = hardirq_ctx[smp_processor_id()]; |
120 | if (curtp != irqtp) { | 120 | if (curtp != irqtp) { |
121 | irqtp->task = curtp->task; | 121 | irqtp->task = curtp->task; |
122 | irqtp->flags = 0; | 122 | irqtp->flags = 0; |
123 | call_ppc_irq_dispatch_handler(regsParm, irq, irqtp); | 123 | call___do_IRQ(irq, regsParm, irqtp); |
124 | irqtp->task = NULL; | 124 | irqtp->task = NULL; |
125 | if (irqtp->flags) | 125 | if (irqtp->flags) |
126 | set_bits(irqtp->flags, &curtp->flags); | 126 | set_bits(irqtp->flags, &curtp->flags); |
127 | } else | 127 | } else |
128 | #endif | 128 | #endif |
129 | ppc_irq_dispatch_handler(regsParm, irq); | 129 | __do_IRQ(irq, regsParm); |
130 | HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber, | 130 | HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber, |
131 | eventParm->eventData.slotInterrupt.subBusNumber, | 131 | eventParm->eventData.slotInterrupt.subBusNumber, |
132 | eventParm->eventData.slotInterrupt.deviceId); | 132 | eventParm->eventData.slotInterrupt.deviceId); |
133 | break; | 133 | break; |
134 | /* Ignore error recovery events for now */ | 134 | /* Ignore error recovery events for now */ |
135 | case XmPciLpEvent_BusCreated: | 135 | case XmPciLpEvent_BusCreated: |
136 | printk(KERN_INFO "intReceived: system bus %d created\n", | 136 | printk(KERN_INFO "intReceived: system bus %d created\n", |
137 | eventParm->eventData.busCreated.busNumber); | 137 | eventParm->eventData.busCreated.busNumber); |
138 | break; | 138 | break; |
139 | case XmPciLpEvent_BusError: | 139 | case XmPciLpEvent_BusError: |
140 | case XmPciLpEvent_BusFailed: | 140 | case XmPciLpEvent_BusFailed: |
141 | printk(KERN_INFO "intReceived: system bus %d failed\n", | 141 | printk(KERN_INFO "intReceived: system bus %d failed\n", |
142 | eventParm->eventData.busFailed.busNumber); | 142 | eventParm->eventData.busFailed.busNumber); |
143 | break; | 143 | break; |
144 | case XmPciLpEvent_BusRecovered: | 144 | case XmPciLpEvent_BusRecovered: |
145 | case XmPciLpEvent_UnQuiesceBus: | 145 | case XmPciLpEvent_UnQuiesceBus: |
146 | printk(KERN_INFO "intReceived: system bus %d recovered\n", | 146 | printk(KERN_INFO "intReceived: system bus %d recovered\n", |
147 | eventParm->eventData.busRecovered.busNumber); | 147 | eventParm->eventData.busRecovered.busNumber); |
148 | break; | 148 | break; |
149 | case XmPciLpEvent_NodeFailed: | 149 | case XmPciLpEvent_NodeFailed: |
150 | case XmPciLpEvent_BridgeError: | 150 | case XmPciLpEvent_BridgeError: |
151 | printk(KERN_INFO | 151 | printk(KERN_INFO |
152 | "intReceived: multi-adapter bridge %d/%d/%d failed\n", | 152 | "intReceived: multi-adapter bridge %d/%d/%d failed\n", |
153 | eventParm->eventData.nodeFailed.busNumber, | 153 | eventParm->eventData.nodeFailed.busNumber, |
154 | eventParm->eventData.nodeFailed.subBusNumber, | 154 | eventParm->eventData.nodeFailed.subBusNumber, |
155 | eventParm->eventData.nodeFailed.deviceId); | 155 | eventParm->eventData.nodeFailed.deviceId); |
156 | break; | 156 | break; |
157 | case XmPciLpEvent_NodeRecovered: | 157 | case XmPciLpEvent_NodeRecovered: |
158 | printk(KERN_INFO | 158 | printk(KERN_INFO |
159 | "intReceived: multi-adapter bridge %d/%d/%d recovered\n", | 159 | "intReceived: multi-adapter bridge %d/%d/%d recovered\n", |
160 | eventParm->eventData.nodeRecovered.busNumber, | 160 | eventParm->eventData.nodeRecovered.busNumber, |
161 | eventParm->eventData.nodeRecovered.subBusNumber, | 161 | eventParm->eventData.nodeRecovered.subBusNumber, |
162 | eventParm->eventData.nodeRecovered.deviceId); | 162 | eventParm->eventData.nodeRecovered.deviceId); |
163 | break; | 163 | break; |
164 | default: | 164 | default: |
165 | printk(KERN_ERR | 165 | printk(KERN_ERR |
166 | "intReceived: unrecognized event subtype 0x%x\n", | 166 | "intReceived: unrecognized event subtype 0x%x\n", |
167 | eventParm->hvLpEvent.xSubtype); | 167 | eventParm->hvLpEvent.xSubtype); |
168 | break; | 168 | break; |
169 | } | 169 | } |
170 | } | 170 | } |
171 | 171 | ||
172 | static void XmPciLpEvent_handler(struct HvLpEvent *eventParm, | 172 | static void XmPciLpEvent_handler(struct HvLpEvent *eventParm, |
173 | struct pt_regs *regsParm) | 173 | struct pt_regs *regsParm) |
174 | { | 174 | { |
175 | #ifdef CONFIG_PCI | 175 | #ifdef CONFIG_PCI |
176 | ++Pci_Event_Count; | 176 | ++Pci_Event_Count; |
177 | 177 | ||
178 | if (eventParm && (eventParm->xType == HvLpEvent_Type_PciIo)) { | 178 | if (eventParm && (eventParm->xType == HvLpEvent_Type_PciIo)) { |
179 | switch (eventParm->xFlags.xFunction) { | 179 | switch (eventParm->xFlags.xFunction) { |
180 | case HvLpEvent_Function_Int: | 180 | case HvLpEvent_Function_Int: |
181 | intReceived((struct XmPciLpEvent *)eventParm, regsParm); | 181 | intReceived((struct XmPciLpEvent *)eventParm, regsParm); |
182 | break; | 182 | break; |
183 | case HvLpEvent_Function_Ack: | 183 | case HvLpEvent_Function_Ack: |
184 | printk(KERN_ERR | 184 | printk(KERN_ERR |
185 | "XmPciLpEvent_handler: unexpected ack received\n"); | 185 | "XmPciLpEvent_handler: unexpected ack received\n"); |
186 | break; | 186 | break; |
187 | default: | 187 | default: |
188 | printk(KERN_ERR | 188 | printk(KERN_ERR |
189 | "XmPciLpEvent_handler: unexpected event function %d\n", | 189 | "XmPciLpEvent_handler: unexpected event function %d\n", |
190 | (int)eventParm->xFlags.xFunction); | 190 | (int)eventParm->xFlags.xFunction); |
191 | break; | 191 | break; |
192 | } | 192 | } |
193 | } else if (eventParm) | 193 | } else if (eventParm) |
194 | printk(KERN_ERR | 194 | printk(KERN_ERR |
195 | "XmPciLpEvent_handler: Unrecognized PCI event type 0x%x\n", | 195 | "XmPciLpEvent_handler: Unrecognized PCI event type 0x%x\n", |
196 | (int)eventParm->xType); | 196 | (int)eventParm->xType); |
197 | else | 197 | else |
198 | printk(KERN_ERR "XmPciLpEvent_handler: NULL event received\n"); | 198 | printk(KERN_ERR "XmPciLpEvent_handler: NULL event received\n"); |
199 | #endif | 199 | #endif |
200 | } | 200 | } |
201 | 201 | ||
202 | /* | 202 | /* |
203 | * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c | 203 | * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c |
204 | * It must be called before the bus walk. | 204 | * It must be called before the bus walk. |
205 | */ | 205 | */ |
206 | void __init iSeries_init_IRQ(void) | 206 | void __init iSeries_init_IRQ(void) |
207 | { | 207 | { |
208 | /* Register PCI event handler and open an event path */ | 208 | /* Register PCI event handler and open an event path */ |
209 | int xRc; | 209 | int xRc; |
210 | 210 | ||
211 | xRc = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, | 211 | xRc = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, |
212 | &XmPciLpEvent_handler); | 212 | &XmPciLpEvent_handler); |
213 | if (xRc == 0) { | 213 | if (xRc == 0) { |
214 | xRc = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); | 214 | xRc = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); |
215 | if (xRc != 0) | 215 | if (xRc != 0) |
216 | printk(KERN_ERR "iSeries_init_IRQ: open event path " | 216 | printk(KERN_ERR "iSeries_init_IRQ: open event path " |
217 | "failed with rc 0x%x\n", xRc); | 217 | "failed with rc 0x%x\n", xRc); |
218 | } else | 218 | } else |
219 | printk(KERN_ERR "iSeries_init_IRQ: register handler " | 219 | printk(KERN_ERR "iSeries_init_IRQ: register handler " |
220 | "failed with rc 0x%x\n", xRc); | 220 | "failed with rc 0x%x\n", xRc); |
221 | } | 221 | } |
222 | 222 | ||
223 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) | 223 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) |
224 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) | 224 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) |
225 | #define REAL_IRQ_TO_FUNC(irq) ((irq) & 7) | 225 | #define REAL_IRQ_TO_FUNC(irq) ((irq) & 7) |
226 | 226 | ||
227 | /* | 227 | /* |
228 | * This will be called by device drivers (via enable_IRQ) | 228 | * This will be called by device drivers (via enable_IRQ) |
229 | * to enable INTA in the bridge interrupt status register. | 229 | * to enable INTA in the bridge interrupt status register. |
230 | */ | 230 | */ |
231 | static void iSeries_enable_IRQ(unsigned int irq) | 231 | static void iSeries_enable_IRQ(unsigned int irq) |
232 | { | 232 | { |
233 | u32 bus, deviceId, function, mask; | 233 | u32 bus, deviceId, function, mask; |
234 | const u32 subBus = 0; | 234 | const u32 subBus = 0; |
235 | unsigned int rirq = virt_irq_to_real_map[irq]; | 235 | unsigned int rirq = virt_irq_to_real_map[irq]; |
236 | 236 | ||
237 | /* The IRQ has already been locked by the caller */ | 237 | /* The IRQ has already been locked by the caller */ |
238 | bus = REAL_IRQ_TO_BUS(rirq); | 238 | bus = REAL_IRQ_TO_BUS(rirq); |
239 | function = REAL_IRQ_TO_FUNC(rirq); | 239 | function = REAL_IRQ_TO_FUNC(rirq); |
240 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | 240 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; |
241 | 241 | ||
242 | /* Unmask secondary INTA */ | 242 | /* Unmask secondary INTA */ |
243 | mask = 0x80000000; | 243 | mask = 0x80000000; |
244 | HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask); | 244 | HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask); |
245 | } | 245 | } |
246 | 246 | ||
247 | /* This is called by iSeries_activate_IRQs */ | 247 | /* This is called by iSeries_activate_IRQs */ |
248 | static unsigned int iSeries_startup_IRQ(unsigned int irq) | 248 | static unsigned int iSeries_startup_IRQ(unsigned int irq) |
249 | { | 249 | { |
250 | u32 bus, deviceId, function, mask; | 250 | u32 bus, deviceId, function, mask; |
251 | const u32 subBus = 0; | 251 | const u32 subBus = 0; |
252 | unsigned int rirq = virt_irq_to_real_map[irq]; | 252 | unsigned int rirq = virt_irq_to_real_map[irq]; |
253 | 253 | ||
254 | bus = REAL_IRQ_TO_BUS(rirq); | 254 | bus = REAL_IRQ_TO_BUS(rirq); |
255 | function = REAL_IRQ_TO_FUNC(rirq); | 255 | function = REAL_IRQ_TO_FUNC(rirq); |
256 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | 256 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; |
257 | 257 | ||
258 | /* Link the IRQ number to the bridge */ | 258 | /* Link the IRQ number to the bridge */ |
259 | HvCallXm_connectBusUnit(bus, subBus, deviceId, irq); | 259 | HvCallXm_connectBusUnit(bus, subBus, deviceId, irq); |
260 | 260 | ||
261 | /* Unmask bridge interrupts in the FISR */ | 261 | /* Unmask bridge interrupts in the FISR */ |
262 | mask = 0x01010000 << function; | 262 | mask = 0x01010000 << function; |
263 | HvCallPci_unmaskFisr(bus, subBus, deviceId, mask); | 263 | HvCallPci_unmaskFisr(bus, subBus, deviceId, mask); |
264 | iSeries_enable_IRQ(irq); | 264 | iSeries_enable_IRQ(irq); |
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
267 | 267 | ||
268 | /* | 268 | /* |
269 | * This is called out of iSeries_fixup to activate interrupt | 269 | * This is called out of iSeries_fixup to activate interrupt |
270 | * generation for usable slots | 270 | * generation for usable slots |
271 | */ | 271 | */ |
272 | void __init iSeries_activate_IRQs() | 272 | void __init iSeries_activate_IRQs() |
273 | { | 273 | { |
274 | int irq; | 274 | int irq; |
275 | unsigned long flags; | 275 | unsigned long flags; |
276 | 276 | ||
277 | for_each_irq (irq) { | 277 | for_each_irq (irq) { |
278 | irq_desc_t *desc = get_irq_desc(irq); | 278 | irq_desc_t *desc = get_irq_desc(irq); |
279 | 279 | ||
280 | if (desc && desc->handler && desc->handler->startup) { | 280 | if (desc && desc->handler && desc->handler->startup) { |
281 | spin_lock_irqsave(&desc->lock, flags); | 281 | spin_lock_irqsave(&desc->lock, flags); |
282 | desc->handler->startup(irq); | 282 | desc->handler->startup(irq); |
283 | spin_unlock_irqrestore(&desc->lock, flags); | 283 | spin_unlock_irqrestore(&desc->lock, flags); |
284 | } | 284 | } |
285 | } | 285 | } |
286 | } | 286 | } |
287 | 287 | ||
288 | /* this is not called anywhere currently */ | 288 | /* this is not called anywhere currently */ |
289 | static void iSeries_shutdown_IRQ(unsigned int irq) | 289 | static void iSeries_shutdown_IRQ(unsigned int irq) |
290 | { | 290 | { |
291 | u32 bus, deviceId, function, mask; | 291 | u32 bus, deviceId, function, mask; |
292 | const u32 subBus = 0; | 292 | const u32 subBus = 0; |
293 | unsigned int rirq = virt_irq_to_real_map[irq]; | 293 | unsigned int rirq = virt_irq_to_real_map[irq]; |
294 | 294 | ||
295 | /* irq should be locked by the caller */ | 295 | /* irq should be locked by the caller */ |
296 | bus = REAL_IRQ_TO_BUS(rirq); | 296 | bus = REAL_IRQ_TO_BUS(rirq); |
297 | function = REAL_IRQ_TO_FUNC(rirq); | 297 | function = REAL_IRQ_TO_FUNC(rirq); |
298 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | 298 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; |
299 | 299 | ||
300 | /* Invalidate the IRQ number in the bridge */ | 300 | /* Invalidate the IRQ number in the bridge */ |
301 | HvCallXm_connectBusUnit(bus, subBus, deviceId, 0); | 301 | HvCallXm_connectBusUnit(bus, subBus, deviceId, 0); |
302 | 302 | ||
303 | /* Mask bridge interrupts in the FISR */ | 303 | /* Mask bridge interrupts in the FISR */ |
304 | mask = 0x01010000 << function; | 304 | mask = 0x01010000 << function; |
305 | HvCallPci_maskFisr(bus, subBus, deviceId, mask); | 305 | HvCallPci_maskFisr(bus, subBus, deviceId, mask); |
306 | } | 306 | } |
307 | 307 | ||
308 | /* | 308 | /* |
309 | * This will be called by device drivers (via disable_IRQ) | 309 | * This will be called by device drivers (via disable_IRQ) |
310 | * to disable INTA in the bridge interrupt status register. | 310 | * to disable INTA in the bridge interrupt status register. |
311 | */ | 311 | */ |
312 | static void iSeries_disable_IRQ(unsigned int irq) | 312 | static void iSeries_disable_IRQ(unsigned int irq) |
313 | { | 313 | { |
314 | u32 bus, deviceId, function, mask; | 314 | u32 bus, deviceId, function, mask; |
315 | const u32 subBus = 0; | 315 | const u32 subBus = 0; |
316 | unsigned int rirq = virt_irq_to_real_map[irq]; | 316 | unsigned int rirq = virt_irq_to_real_map[irq]; |
317 | 317 | ||
318 | /* The IRQ has already been locked by the caller */ | 318 | /* The IRQ has already been locked by the caller */ |
319 | bus = REAL_IRQ_TO_BUS(rirq); | 319 | bus = REAL_IRQ_TO_BUS(rirq); |
320 | function = REAL_IRQ_TO_FUNC(rirq); | 320 | function = REAL_IRQ_TO_FUNC(rirq); |
321 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | 321 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; |
322 | 322 | ||
323 | /* Mask secondary INTA */ | 323 | /* Mask secondary INTA */ |
324 | mask = 0x80000000; | 324 | mask = 0x80000000; |
325 | HvCallPci_maskInterrupts(bus, subBus, deviceId, mask); | 325 | HvCallPci_maskInterrupts(bus, subBus, deviceId, mask); |
326 | } | 326 | } |
327 | 327 | ||
328 | /* | 328 | /* |
329 | * Need to define this so ppc_irq_dispatch_handler will NOT call | 329 | * This does nothing because there is not enough information |
330 | * enable_IRQ at the end of interrupt handling. However, this does | 330 | * provided to do the EOI HvCall. This is done by XmPciLpEvent.c |
331 | * nothing because there is not enough information provided to do | ||
332 | * the EOI HvCall. This is done by XmPciLpEvent.c | ||
333 | */ | 331 | */ |
334 | static void iSeries_end_IRQ(unsigned int irq) | 332 | static void iSeries_end_IRQ(unsigned int irq) |
335 | { | 333 | { |
336 | } | 334 | } |
337 | 335 | ||
338 | static hw_irq_controller iSeries_IRQ_handler = { | 336 | static hw_irq_controller iSeries_IRQ_handler = { |
339 | .typename = "iSeries irq controller", | 337 | .typename = "iSeries irq controller", |
340 | .startup = iSeries_startup_IRQ, | 338 | .startup = iSeries_startup_IRQ, |
341 | .shutdown = iSeries_shutdown_IRQ, | 339 | .shutdown = iSeries_shutdown_IRQ, |
342 | .enable = iSeries_enable_IRQ, | 340 | .enable = iSeries_enable_IRQ, |
343 | .disable = iSeries_disable_IRQ, | 341 | .disable = iSeries_disable_IRQ, |
344 | .end = iSeries_end_IRQ | 342 | .end = iSeries_end_IRQ |
345 | }; | 343 | }; |
346 | 344 | ||
347 | /* | 345 | /* |
348 | * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot | 346 | * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot |
349 | * It calculates the irq value for the slot. | 347 | * It calculates the irq value for the slot. |
350 | * Note that subBusNumber is always 0 (at the moment at least). | 348 | * Note that subBusNumber is always 0 (at the moment at least). |
351 | */ | 349 | */ |
352 | int __init iSeries_allocate_IRQ(HvBusNumber busNumber, | 350 | int __init iSeries_allocate_IRQ(HvBusNumber busNumber, |
353 | HvSubBusNumber subBusNumber, HvAgentId deviceId) | 351 | HvSubBusNumber subBusNumber, HvAgentId deviceId) |
354 | { | 352 | { |
355 | unsigned int realirq, virtirq; | 353 | unsigned int realirq, virtirq; |
356 | u8 idsel = (deviceId >> 4); | 354 | u8 idsel = (deviceId >> 4); |
357 | u8 function = deviceId & 7; | 355 | u8 function = deviceId & 7; |
358 | 356 | ||
359 | virtirq = next_virtual_irq++; | 357 | virtirq = next_virtual_irq++; |
360 | realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function; | 358 | realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function; |
361 | virt_irq_to_real_map[virtirq] = realirq; | 359 | virt_irq_to_real_map[virtirq] = realirq; |
362 | 360 | ||
363 | irq_desc[virtirq].handler = &iSeries_IRQ_handler; | 361 | irq_desc[virtirq].handler = &iSeries_IRQ_handler; |
364 | return virtirq; | 362 | return virtirq; |
365 | } | 363 | } |
366 | 364 | ||
367 | int virt_irq_create_mapping(unsigned int real_irq) | 365 | int virt_irq_create_mapping(unsigned int real_irq) |
368 | { | 366 | { |
369 | BUG(); /* Don't call this on iSeries, yet */ | 367 | BUG(); /* Don't call this on iSeries, yet */ |
370 | 368 | ||
371 | return 0; | 369 | return 0; |
372 | } | 370 | } |
373 | 371 | ||
374 | void virt_irq_init(void) | 372 | void virt_irq_init(void) |
375 | { | 373 | { |
376 | return; | 374 | return; |
377 | } | 375 | } |
378 | 376 |
arch/powerpc/platforms/pseries/xics.c
1 | /* | 1 | /* |
2 | * arch/powerpc/platforms/pseries/xics.c | 2 | * arch/powerpc/platforms/pseries/xics.c |
3 | * | 3 | * |
4 | * Copyright 2000 IBM Corporation. | 4 | * Copyright 2000 IBM Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | #include <linux/config.h> | 11 | #include <linux/config.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/signal.h> | 18 | #include <linux/signal.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
21 | #include <linux/radix-tree.h> | 21 | #include <linux/radix-tree.h> |
22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #include <asm/prom.h> | 23 | #include <asm/prom.h> |
24 | #include <asm/io.h> | 24 | #include <asm/io.h> |
25 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
26 | #include <asm/smp.h> | 26 | #include <asm/smp.h> |
27 | #include <asm/rtas.h> | 27 | #include <asm/rtas.h> |
28 | #include <asm/hvcall.h> | 28 | #include <asm/hvcall.h> |
29 | #include <asm/machdep.h> | 29 | #include <asm/machdep.h> |
30 | #include <asm/i8259.h> | 30 | #include <asm/i8259.h> |
31 | 31 | ||
32 | #include "xics.h" | 32 | #include "xics.h" |
33 | 33 | ||
34 | static unsigned int xics_startup(unsigned int irq); | 34 | static unsigned int xics_startup(unsigned int irq); |
35 | static void xics_enable_irq(unsigned int irq); | 35 | static void xics_enable_irq(unsigned int irq); |
36 | static void xics_disable_irq(unsigned int irq); | 36 | static void xics_disable_irq(unsigned int irq); |
37 | static void xics_mask_and_ack_irq(unsigned int irq); | 37 | static void xics_mask_and_ack_irq(unsigned int irq); |
38 | static void xics_end_irq(unsigned int irq); | 38 | static void xics_end_irq(unsigned int irq); |
39 | static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask); | 39 | static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask); |
40 | 40 | ||
41 | static struct hw_interrupt_type xics_pic = { | 41 | static struct hw_interrupt_type xics_pic = { |
42 | .typename = " XICS ", | 42 | .typename = " XICS ", |
43 | .startup = xics_startup, | 43 | .startup = xics_startup, |
44 | .enable = xics_enable_irq, | 44 | .enable = xics_enable_irq, |
45 | .disable = xics_disable_irq, | 45 | .disable = xics_disable_irq, |
46 | .ack = xics_mask_and_ack_irq, | 46 | .ack = xics_mask_and_ack_irq, |
47 | .end = xics_end_irq, | 47 | .end = xics_end_irq, |
48 | .set_affinity = xics_set_affinity | 48 | .set_affinity = xics_set_affinity |
49 | }; | 49 | }; |
50 | 50 | ||
51 | static struct hw_interrupt_type xics_8259_pic = { | 51 | static struct hw_interrupt_type xics_8259_pic = { |
52 | .typename = " XICS/8259", | 52 | .typename = " XICS/8259", |
53 | .ack = xics_mask_and_ack_irq, | 53 | .ack = xics_mask_and_ack_irq, |
54 | }; | 54 | }; |
55 | 55 | ||
56 | /* This is used to map real irq numbers to virtual */ | 56 | /* This is used to map real irq numbers to virtual */ |
57 | static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC); | 57 | static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC); |
58 | 58 | ||
59 | #define XICS_IPI 2 | 59 | #define XICS_IPI 2 |
60 | #define XICS_IRQ_SPURIOUS 0 | 60 | #define XICS_IRQ_SPURIOUS 0 |
61 | 61 | ||
62 | /* Want a priority other than 0. Various HW issues require this. */ | 62 | /* Want a priority other than 0. Various HW issues require this. */ |
63 | #define DEFAULT_PRIORITY 5 | 63 | #define DEFAULT_PRIORITY 5 |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Mark IPIs as higher priority so we can take them inside interrupts that | 66 | * Mark IPIs as higher priority so we can take them inside interrupts that |
67 | * arent marked SA_INTERRUPT | 67 | * arent marked SA_INTERRUPT |
68 | */ | 68 | */ |
69 | #define IPI_PRIORITY 4 | 69 | #define IPI_PRIORITY 4 |
70 | 70 | ||
71 | struct xics_ipl { | 71 | struct xics_ipl { |
72 | union { | 72 | union { |
73 | u32 word; | 73 | u32 word; |
74 | u8 bytes[4]; | 74 | u8 bytes[4]; |
75 | } xirr_poll; | 75 | } xirr_poll; |
76 | union { | 76 | union { |
77 | u32 word; | 77 | u32 word; |
78 | u8 bytes[4]; | 78 | u8 bytes[4]; |
79 | } xirr; | 79 | } xirr; |
80 | u32 dummy; | 80 | u32 dummy; |
81 | union { | 81 | union { |
82 | u32 word; | 82 | u32 word; |
83 | u8 bytes[4]; | 83 | u8 bytes[4]; |
84 | } qirr; | 84 | } qirr; |
85 | }; | 85 | }; |
86 | 86 | ||
87 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; | 87 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; |
88 | 88 | ||
89 | static int xics_irq_8259_cascade = 0; | 89 | static int xics_irq_8259_cascade = 0; |
90 | static int xics_irq_8259_cascade_real = 0; | 90 | static int xics_irq_8259_cascade_real = 0; |
91 | static unsigned int default_server = 0xFF; | 91 | static unsigned int default_server = 0xFF; |
92 | static unsigned int default_distrib_server = 0; | 92 | static unsigned int default_distrib_server = 0; |
93 | static unsigned int interrupt_server_size = 8; | 93 | static unsigned int interrupt_server_size = 8; |
94 | 94 | ||
95 | /* | 95 | /* |
96 | * XICS only has a single IPI, so encode the messages per CPU | 96 | * XICS only has a single IPI, so encode the messages per CPU |
97 | */ | 97 | */ |
98 | struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; | 98 | struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; |
99 | 99 | ||
100 | /* RTAS service tokens */ | 100 | /* RTAS service tokens */ |
101 | static int ibm_get_xive; | 101 | static int ibm_get_xive; |
102 | static int ibm_set_xive; | 102 | static int ibm_set_xive; |
103 | static int ibm_int_on; | 103 | static int ibm_int_on; |
104 | static int ibm_int_off; | 104 | static int ibm_int_off; |
105 | 105 | ||
106 | typedef struct { | 106 | typedef struct { |
107 | int (*xirr_info_get)(int cpu); | 107 | int (*xirr_info_get)(int cpu); |
108 | void (*xirr_info_set)(int cpu, int val); | 108 | void (*xirr_info_set)(int cpu, int val); |
109 | void (*cppr_info)(int cpu, u8 val); | 109 | void (*cppr_info)(int cpu, u8 val); |
110 | void (*qirr_info)(int cpu, u8 val); | 110 | void (*qirr_info)(int cpu, u8 val); |
111 | } xics_ops; | 111 | } xics_ops; |
112 | 112 | ||
113 | 113 | ||
114 | /* SMP */ | 114 | /* SMP */ |
115 | 115 | ||
116 | static int pSeries_xirr_info_get(int n_cpu) | 116 | static int pSeries_xirr_info_get(int n_cpu) |
117 | { | 117 | { |
118 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); | 118 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); |
119 | } | 119 | } |
120 | 120 | ||
121 | static void pSeries_xirr_info_set(int n_cpu, int value) | 121 | static void pSeries_xirr_info_set(int n_cpu, int value) |
122 | { | 122 | { |
123 | out_be32(&xics_per_cpu[n_cpu]->xirr.word, value); | 123 | out_be32(&xics_per_cpu[n_cpu]->xirr.word, value); |
124 | } | 124 | } |
125 | 125 | ||
126 | static void pSeries_cppr_info(int n_cpu, u8 value) | 126 | static void pSeries_cppr_info(int n_cpu, u8 value) |
127 | { | 127 | { |
128 | out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value); | 128 | out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value); |
129 | } | 129 | } |
130 | 130 | ||
131 | static void pSeries_qirr_info(int n_cpu, u8 value) | 131 | static void pSeries_qirr_info(int n_cpu, u8 value) |
132 | { | 132 | { |
133 | out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); | 133 | out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); |
134 | } | 134 | } |
135 | 135 | ||
136 | static xics_ops pSeries_ops = { | 136 | static xics_ops pSeries_ops = { |
137 | pSeries_xirr_info_get, | 137 | pSeries_xirr_info_get, |
138 | pSeries_xirr_info_set, | 138 | pSeries_xirr_info_set, |
139 | pSeries_cppr_info, | 139 | pSeries_cppr_info, |
140 | pSeries_qirr_info | 140 | pSeries_qirr_info |
141 | }; | 141 | }; |
142 | 142 | ||
143 | static xics_ops *ops = &pSeries_ops; | 143 | static xics_ops *ops = &pSeries_ops; |
144 | 144 | ||
145 | 145 | ||
146 | /* LPAR */ | 146 | /* LPAR */ |
147 | 147 | ||
148 | static inline long plpar_eoi(unsigned long xirr) | 148 | static inline long plpar_eoi(unsigned long xirr) |
149 | { | 149 | { |
150 | return plpar_hcall_norets(H_EOI, xirr); | 150 | return plpar_hcall_norets(H_EOI, xirr); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline long plpar_cppr(unsigned long cppr) | 153 | static inline long plpar_cppr(unsigned long cppr) |
154 | { | 154 | { |
155 | return plpar_hcall_norets(H_CPPR, cppr); | 155 | return plpar_hcall_norets(H_CPPR, cppr); |
156 | } | 156 | } |
157 | 157 | ||
158 | static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) | 158 | static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) |
159 | { | 159 | { |
160 | return plpar_hcall_norets(H_IPI, servernum, mfrr); | 160 | return plpar_hcall_norets(H_IPI, servernum, mfrr); |
161 | } | 161 | } |
162 | 162 | ||
163 | static inline long plpar_xirr(unsigned long *xirr_ret) | 163 | static inline long plpar_xirr(unsigned long *xirr_ret) |
164 | { | 164 | { |
165 | unsigned long dummy; | 165 | unsigned long dummy; |
166 | return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); | 166 | return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); |
167 | } | 167 | } |
168 | 168 | ||
169 | static int pSeriesLP_xirr_info_get(int n_cpu) | 169 | static int pSeriesLP_xirr_info_get(int n_cpu) |
170 | { | 170 | { |
171 | unsigned long lpar_rc; | 171 | unsigned long lpar_rc; |
172 | unsigned long return_value; | 172 | unsigned long return_value; |
173 | 173 | ||
174 | lpar_rc = plpar_xirr(&return_value); | 174 | lpar_rc = plpar_xirr(&return_value); |
175 | if (lpar_rc != H_Success) | 175 | if (lpar_rc != H_Success) |
176 | panic(" bad return code xirr - rc = %lx \n", lpar_rc); | 176 | panic(" bad return code xirr - rc = %lx \n", lpar_rc); |
177 | return (int)return_value; | 177 | return (int)return_value; |
178 | } | 178 | } |
179 | 179 | ||
180 | static void pSeriesLP_xirr_info_set(int n_cpu, int value) | 180 | static void pSeriesLP_xirr_info_set(int n_cpu, int value) |
181 | { | 181 | { |
182 | unsigned long lpar_rc; | 182 | unsigned long lpar_rc; |
183 | unsigned long val64 = value & 0xffffffff; | 183 | unsigned long val64 = value & 0xffffffff; |
184 | 184 | ||
185 | lpar_rc = plpar_eoi(val64); | 185 | lpar_rc = plpar_eoi(val64); |
186 | if (lpar_rc != H_Success) | 186 | if (lpar_rc != H_Success) |
187 | panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc, | 187 | panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc, |
188 | val64); | 188 | val64); |
189 | } | 189 | } |
190 | 190 | ||
191 | void pSeriesLP_cppr_info(int n_cpu, u8 value) | 191 | void pSeriesLP_cppr_info(int n_cpu, u8 value) |
192 | { | 192 | { |
193 | unsigned long lpar_rc; | 193 | unsigned long lpar_rc; |
194 | 194 | ||
195 | lpar_rc = plpar_cppr(value); | 195 | lpar_rc = plpar_cppr(value); |
196 | if (lpar_rc != H_Success) | 196 | if (lpar_rc != H_Success) |
197 | panic("bad return code cppr - rc = %lx\n", lpar_rc); | 197 | panic("bad return code cppr - rc = %lx\n", lpar_rc); |
198 | } | 198 | } |
199 | 199 | ||
200 | static void pSeriesLP_qirr_info(int n_cpu , u8 value) | 200 | static void pSeriesLP_qirr_info(int n_cpu , u8 value) |
201 | { | 201 | { |
202 | unsigned long lpar_rc; | 202 | unsigned long lpar_rc; |
203 | 203 | ||
204 | lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); | 204 | lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); |
205 | if (lpar_rc != H_Success) | 205 | if (lpar_rc != H_Success) |
206 | panic("bad return code qirr - rc = %lx\n", lpar_rc); | 206 | panic("bad return code qirr - rc = %lx\n", lpar_rc); |
207 | } | 207 | } |
208 | 208 | ||
209 | xics_ops pSeriesLP_ops = { | 209 | xics_ops pSeriesLP_ops = { |
210 | pSeriesLP_xirr_info_get, | 210 | pSeriesLP_xirr_info_get, |
211 | pSeriesLP_xirr_info_set, | 211 | pSeriesLP_xirr_info_set, |
212 | pSeriesLP_cppr_info, | 212 | pSeriesLP_cppr_info, |
213 | pSeriesLP_qirr_info | 213 | pSeriesLP_qirr_info |
214 | }; | 214 | }; |
215 | 215 | ||
216 | static unsigned int xics_startup(unsigned int virq) | 216 | static unsigned int xics_startup(unsigned int virq) |
217 | { | 217 | { |
218 | unsigned int irq; | 218 | unsigned int irq; |
219 | 219 | ||
220 | irq = irq_offset_down(virq); | 220 | irq = irq_offset_down(virq); |
221 | if (radix_tree_insert(&irq_map, virt_irq_to_real(irq), | 221 | if (radix_tree_insert(&irq_map, virt_irq_to_real(irq), |
222 | &virt_irq_to_real_map[irq]) == -ENOMEM) | 222 | &virt_irq_to_real_map[irq]) == -ENOMEM) |
223 | printk(KERN_CRIT "Out of memory creating real -> virtual" | 223 | printk(KERN_CRIT "Out of memory creating real -> virtual" |
224 | " IRQ mapping for irq %u (real 0x%x)\n", | 224 | " IRQ mapping for irq %u (real 0x%x)\n", |
225 | virq, virt_irq_to_real(irq)); | 225 | virq, virt_irq_to_real(irq)); |
226 | xics_enable_irq(virq); | 226 | xics_enable_irq(virq); |
227 | return 0; /* return value is ignored */ | 227 | return 0; /* return value is ignored */ |
228 | } | 228 | } |
229 | 229 | ||
230 | static unsigned int real_irq_to_virt(unsigned int real_irq) | 230 | static unsigned int real_irq_to_virt(unsigned int real_irq) |
231 | { | 231 | { |
232 | unsigned int *ptr; | 232 | unsigned int *ptr; |
233 | 233 | ||
234 | ptr = radix_tree_lookup(&irq_map, real_irq); | 234 | ptr = radix_tree_lookup(&irq_map, real_irq); |
235 | if (ptr == NULL) | 235 | if (ptr == NULL) |
236 | return NO_IRQ; | 236 | return NO_IRQ; |
237 | return ptr - virt_irq_to_real_map; | 237 | return ptr - virt_irq_to_real_map; |
238 | } | 238 | } |
239 | 239 | ||
240 | #ifdef CONFIG_SMP | 240 | #ifdef CONFIG_SMP |
241 | static int get_irq_server(unsigned int irq) | 241 | static int get_irq_server(unsigned int irq) |
242 | { | 242 | { |
243 | unsigned int server; | 243 | unsigned int server; |
244 | /* For the moment only implement delivery to all cpus or one cpu */ | 244 | /* For the moment only implement delivery to all cpus or one cpu */ |
245 | cpumask_t cpumask = irq_affinity[irq]; | 245 | cpumask_t cpumask = irq_affinity[irq]; |
246 | cpumask_t tmp = CPU_MASK_NONE; | 246 | cpumask_t tmp = CPU_MASK_NONE; |
247 | 247 | ||
248 | if (!distribute_irqs) | 248 | if (!distribute_irqs) |
249 | return default_server; | 249 | return default_server; |
250 | 250 | ||
251 | if (cpus_equal(cpumask, CPU_MASK_ALL)) { | 251 | if (cpus_equal(cpumask, CPU_MASK_ALL)) { |
252 | server = default_distrib_server; | 252 | server = default_distrib_server; |
253 | } else { | 253 | } else { |
254 | cpus_and(tmp, cpu_online_map, cpumask); | 254 | cpus_and(tmp, cpu_online_map, cpumask); |
255 | 255 | ||
256 | if (cpus_empty(tmp)) | 256 | if (cpus_empty(tmp)) |
257 | server = default_distrib_server; | 257 | server = default_distrib_server; |
258 | else | 258 | else |
259 | server = get_hard_smp_processor_id(first_cpu(tmp)); | 259 | server = get_hard_smp_processor_id(first_cpu(tmp)); |
260 | } | 260 | } |
261 | 261 | ||
262 | return server; | 262 | return server; |
263 | 263 | ||
264 | } | 264 | } |
265 | #else | 265 | #else |
266 | static int get_irq_server(unsigned int irq) | 266 | static int get_irq_server(unsigned int irq) |
267 | { | 267 | { |
268 | return default_server; | 268 | return default_server; |
269 | } | 269 | } |
270 | #endif | 270 | #endif |
271 | 271 | ||
272 | static void xics_enable_irq(unsigned int virq) | 272 | static void xics_enable_irq(unsigned int virq) |
273 | { | 273 | { |
274 | unsigned int irq; | 274 | unsigned int irq; |
275 | int call_status; | 275 | int call_status; |
276 | unsigned int server; | 276 | unsigned int server; |
277 | 277 | ||
278 | irq = virt_irq_to_real(irq_offset_down(virq)); | 278 | irq = virt_irq_to_real(irq_offset_down(virq)); |
279 | if (irq == XICS_IPI) | 279 | if (irq == XICS_IPI) |
280 | return; | 280 | return; |
281 | 281 | ||
282 | server = get_irq_server(virq); | 282 | server = get_irq_server(virq); |
283 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, | 283 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, |
284 | DEFAULT_PRIORITY); | 284 | DEFAULT_PRIORITY); |
285 | if (call_status != 0) { | 285 | if (call_status != 0) { |
286 | printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive " | 286 | printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive " |
287 | "returned %d\n", irq, call_status); | 287 | "returned %d\n", irq, call_status); |
288 | printk("set_xive %x, server %x\n", ibm_set_xive, server); | 288 | printk("set_xive %x, server %x\n", ibm_set_xive, server); |
289 | return; | 289 | return; |
290 | } | 290 | } |
291 | 291 | ||
292 | /* Now unmask the interrupt (often a no-op) */ | 292 | /* Now unmask the interrupt (often a no-op) */ |
293 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); | 293 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); |
294 | if (call_status != 0) { | 294 | if (call_status != 0) { |
295 | printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on " | 295 | printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on " |
296 | "returned %d\n", irq, call_status); | 296 | "returned %d\n", irq, call_status); |
297 | return; | 297 | return; |
298 | } | 298 | } |
299 | } | 299 | } |
300 | 300 | ||
301 | static void xics_disable_real_irq(unsigned int irq) | 301 | static void xics_disable_real_irq(unsigned int irq) |
302 | { | 302 | { |
303 | int call_status; | 303 | int call_status; |
304 | unsigned int server; | 304 | unsigned int server; |
305 | 305 | ||
306 | if (irq == XICS_IPI) | 306 | if (irq == XICS_IPI) |
307 | return; | 307 | return; |
308 | 308 | ||
309 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq); | 309 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq); |
310 | if (call_status != 0) { | 310 | if (call_status != 0) { |
311 | printk(KERN_ERR "xics_disable_real_irq: irq=%u: " | 311 | printk(KERN_ERR "xics_disable_real_irq: irq=%u: " |
312 | "ibm_int_off returned %d\n", irq, call_status); | 312 | "ibm_int_off returned %d\n", irq, call_status); |
313 | return; | 313 | return; |
314 | } | 314 | } |
315 | 315 | ||
316 | server = get_irq_server(irq); | 316 | server = get_irq_server(irq); |
317 | /* Have to set XIVE to 0xff to be able to remove a slot */ | 317 | /* Have to set XIVE to 0xff to be able to remove a slot */ |
318 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff); | 318 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff); |
319 | if (call_status != 0) { | 319 | if (call_status != 0) { |
320 | printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)" | 320 | printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)" |
321 | " returned %d\n", irq, call_status); | 321 | " returned %d\n", irq, call_status); |
322 | return; | 322 | return; |
323 | } | 323 | } |
324 | } | 324 | } |
325 | 325 | ||
326 | static void xics_disable_irq(unsigned int virq) | 326 | static void xics_disable_irq(unsigned int virq) |
327 | { | 327 | { |
328 | unsigned int irq; | 328 | unsigned int irq; |
329 | 329 | ||
330 | irq = virt_irq_to_real(irq_offset_down(virq)); | 330 | irq = virt_irq_to_real(irq_offset_down(virq)); |
331 | xics_disable_real_irq(irq); | 331 | xics_disable_real_irq(irq); |
332 | } | 332 | } |
333 | 333 | ||
334 | static void xics_end_irq(unsigned int irq) | 334 | static void xics_end_irq(unsigned int irq) |
335 | { | 335 | { |
336 | int cpu = smp_processor_id(); | 336 | int cpu = smp_processor_id(); |
337 | 337 | ||
338 | iosync(); | 338 | iosync(); |
339 | ops->xirr_info_set(cpu, ((0xff << 24) | | 339 | ops->xirr_info_set(cpu, ((0xff << 24) | |
340 | (virt_irq_to_real(irq_offset_down(irq))))); | 340 | (virt_irq_to_real(irq_offset_down(irq))))); |
341 | 341 | ||
342 | } | 342 | } |
343 | 343 | ||
344 | static void xics_mask_and_ack_irq(unsigned int irq) | 344 | static void xics_mask_and_ack_irq(unsigned int irq) |
345 | { | 345 | { |
346 | int cpu = smp_processor_id(); | 346 | int cpu = smp_processor_id(); |
347 | 347 | ||
348 | if (irq < irq_offset_value()) { | 348 | if (irq < irq_offset_value()) { |
349 | i8259_pic.ack(irq); | 349 | i8259_pic.ack(irq); |
350 | iosync(); | 350 | iosync(); |
351 | ops->xirr_info_set(cpu, ((0xff<<24) | | 351 | ops->xirr_info_set(cpu, ((0xff<<24) | |
352 | xics_irq_8259_cascade_real)); | 352 | xics_irq_8259_cascade_real)); |
353 | iosync(); | 353 | iosync(); |
354 | } | 354 | } |
355 | } | 355 | } |
356 | 356 | ||
357 | int xics_get_irq(struct pt_regs *regs) | 357 | int xics_get_irq(struct pt_regs *regs) |
358 | { | 358 | { |
359 | unsigned int cpu = smp_processor_id(); | 359 | unsigned int cpu = smp_processor_id(); |
360 | unsigned int vec; | 360 | unsigned int vec; |
361 | int irq; | 361 | int irq; |
362 | 362 | ||
363 | vec = ops->xirr_info_get(cpu); | 363 | vec = ops->xirr_info_get(cpu); |
364 | /* (vec >> 24) == old priority */ | 364 | /* (vec >> 24) == old priority */ |
365 | vec &= 0x00ffffff; | 365 | vec &= 0x00ffffff; |
366 | 366 | ||
367 | /* for sanity, this had better be < NR_IRQS - 16 */ | 367 | /* for sanity, this had better be < NR_IRQS - 16 */ |
368 | if (vec == xics_irq_8259_cascade_real) { | 368 | if (vec == xics_irq_8259_cascade_real) { |
369 | irq = i8259_irq(regs); | 369 | irq = i8259_irq(regs); |
370 | if (irq == -1) { | 370 | if (irq == -1) { |
371 | /* Spurious cascaded interrupt. Still must ack xics */ | 371 | /* Spurious cascaded interrupt. Still must ack xics */ |
372 | xics_end_irq(irq_offset_up(xics_irq_8259_cascade)); | 372 | xics_end_irq(irq_offset_up(xics_irq_8259_cascade)); |
373 | 373 | ||
374 | irq = -1; | 374 | irq = -1; |
375 | } | 375 | } |
376 | } else if (vec == XICS_IRQ_SPURIOUS) { | 376 | } else if (vec == XICS_IRQ_SPURIOUS) { |
377 | irq = -1; | 377 | irq = -1; |
378 | } else { | 378 | } else { |
379 | irq = real_irq_to_virt(vec); | 379 | irq = real_irq_to_virt(vec); |
380 | if (irq == NO_IRQ) | 380 | if (irq == NO_IRQ) |
381 | irq = real_irq_to_virt_slowpath(vec); | 381 | irq = real_irq_to_virt_slowpath(vec); |
382 | if (irq == NO_IRQ) { | 382 | if (irq == NO_IRQ) { |
383 | printk(KERN_ERR "Interrupt %u (real) is invalid," | 383 | printk(KERN_ERR "Interrupt %u (real) is invalid," |
384 | " disabling it.\n", vec); | 384 | " disabling it.\n", vec); |
385 | xics_disable_real_irq(vec); | 385 | xics_disable_real_irq(vec); |
386 | } else | 386 | } else |
387 | irq = irq_offset_up(irq); | 387 | irq = irq_offset_up(irq); |
388 | } | 388 | } |
389 | return irq; | 389 | return irq; |
390 | } | 390 | } |
391 | 391 | ||
392 | #ifdef CONFIG_SMP | 392 | #ifdef CONFIG_SMP |
393 | 393 | ||
394 | irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | 394 | irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) |
395 | { | 395 | { |
396 | int cpu = smp_processor_id(); | 396 | int cpu = smp_processor_id(); |
397 | 397 | ||
398 | ops->qirr_info(cpu, 0xff); | 398 | ops->qirr_info(cpu, 0xff); |
399 | 399 | ||
400 | WARN_ON(cpu_is_offline(cpu)); | 400 | WARN_ON(cpu_is_offline(cpu)); |
401 | 401 | ||
402 | while (xics_ipi_message[cpu].value) { | 402 | while (xics_ipi_message[cpu].value) { |
403 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, | 403 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, |
404 | &xics_ipi_message[cpu].value)) { | 404 | &xics_ipi_message[cpu].value)) { |
405 | mb(); | 405 | mb(); |
406 | smp_message_recv(PPC_MSG_CALL_FUNCTION, regs); | 406 | smp_message_recv(PPC_MSG_CALL_FUNCTION, regs); |
407 | } | 407 | } |
408 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, | 408 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, |
409 | &xics_ipi_message[cpu].value)) { | 409 | &xics_ipi_message[cpu].value)) { |
410 | mb(); | 410 | mb(); |
411 | smp_message_recv(PPC_MSG_RESCHEDULE, regs); | 411 | smp_message_recv(PPC_MSG_RESCHEDULE, regs); |
412 | } | 412 | } |
413 | #if 0 | 413 | #if 0 |
414 | if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK, | 414 | if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK, |
415 | &xics_ipi_message[cpu].value)) { | 415 | &xics_ipi_message[cpu].value)) { |
416 | mb(); | 416 | mb(); |
417 | smp_message_recv(PPC_MSG_MIGRATE_TASK, regs); | 417 | smp_message_recv(PPC_MSG_MIGRATE_TASK, regs); |
418 | } | 418 | } |
419 | #endif | 419 | #endif |
420 | #ifdef CONFIG_DEBUGGER | 420 | #ifdef CONFIG_DEBUGGER |
421 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, | 421 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, |
422 | &xics_ipi_message[cpu].value)) { | 422 | &xics_ipi_message[cpu].value)) { |
423 | mb(); | 423 | mb(); |
424 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs); | 424 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs); |
425 | } | 425 | } |
426 | #endif | 426 | #endif |
427 | } | 427 | } |
428 | return IRQ_HANDLED; | 428 | return IRQ_HANDLED; |
429 | } | 429 | } |
430 | 430 | ||
431 | void xics_cause_IPI(int cpu) | 431 | void xics_cause_IPI(int cpu) |
432 | { | 432 | { |
433 | ops->qirr_info(cpu, IPI_PRIORITY); | 433 | ops->qirr_info(cpu, IPI_PRIORITY); |
434 | } | 434 | } |
435 | #endif /* CONFIG_SMP */ | 435 | #endif /* CONFIG_SMP */ |
436 | 436 | ||
437 | void xics_setup_cpu(void) | 437 | void xics_setup_cpu(void) |
438 | { | 438 | { |
439 | int cpu = smp_processor_id(); | 439 | int cpu = smp_processor_id(); |
440 | 440 | ||
441 | ops->cppr_info(cpu, 0xff); | 441 | ops->cppr_info(cpu, 0xff); |
442 | iosync(); | 442 | iosync(); |
443 | 443 | ||
444 | /* | 444 | /* |
445 | * Put the calling processor into the GIQ. This is really only | 445 | * Put the calling processor into the GIQ. This is really only |
446 | * necessary from a secondary thread as the OF start-cpu interface | 446 | * necessary from a secondary thread as the OF start-cpu interface |
447 | * performs this function for us on primary threads. | 447 | * performs this function for us on primary threads. |
448 | * | 448 | * |
449 | * XXX: undo of teardown on kexec needs this too, as may hotplug | 449 | * XXX: undo of teardown on kexec needs this too, as may hotplug |
450 | */ | 450 | */ |
451 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | 451 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, |
452 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); | 452 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); |
453 | } | 453 | } |
454 | 454 | ||
455 | void xics_init_IRQ(void) | 455 | void xics_init_IRQ(void) |
456 | { | 456 | { |
457 | int i; | 457 | int i; |
458 | unsigned long intr_size = 0; | 458 | unsigned long intr_size = 0; |
459 | struct device_node *np; | 459 | struct device_node *np; |
460 | uint *ireg, ilen, indx = 0; | 460 | uint *ireg, ilen, indx = 0; |
461 | unsigned long intr_base = 0; | 461 | unsigned long intr_base = 0; |
462 | struct xics_interrupt_node { | 462 | struct xics_interrupt_node { |
463 | unsigned long addr; | 463 | unsigned long addr; |
464 | unsigned long size; | 464 | unsigned long size; |
465 | } intnodes[NR_CPUS]; | 465 | } intnodes[NR_CPUS]; |
466 | 466 | ||
467 | ppc64_boot_msg(0x20, "XICS Init"); | 467 | ppc64_boot_msg(0x20, "XICS Init"); |
468 | 468 | ||
469 | ibm_get_xive = rtas_token("ibm,get-xive"); | 469 | ibm_get_xive = rtas_token("ibm,get-xive"); |
470 | ibm_set_xive = rtas_token("ibm,set-xive"); | 470 | ibm_set_xive = rtas_token("ibm,set-xive"); |
471 | ibm_int_on = rtas_token("ibm,int-on"); | 471 | ibm_int_on = rtas_token("ibm,int-on"); |
472 | ibm_int_off = rtas_token("ibm,int-off"); | 472 | ibm_int_off = rtas_token("ibm,int-off"); |
473 | 473 | ||
474 | np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation"); | 474 | np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation"); |
475 | if (!np) | 475 | if (!np) |
476 | panic("xics_init_IRQ: can't find interrupt presentation"); | 476 | panic("xics_init_IRQ: can't find interrupt presentation"); |
477 | 477 | ||
478 | nextnode: | 478 | nextnode: |
479 | ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL); | 479 | ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL); |
480 | if (ireg) { | 480 | if (ireg) { |
481 | /* | 481 | /* |
482 | * set node starting index for this node | 482 | * set node starting index for this node |
483 | */ | 483 | */ |
484 | indx = *ireg; | 484 | indx = *ireg; |
485 | } | 485 | } |
486 | 486 | ||
487 | ireg = (uint *)get_property(np, "reg", &ilen); | 487 | ireg = (uint *)get_property(np, "reg", &ilen); |
488 | if (!ireg) | 488 | if (!ireg) |
489 | panic("xics_init_IRQ: can't find interrupt reg property"); | 489 | panic("xics_init_IRQ: can't find interrupt reg property"); |
490 | 490 | ||
491 | while (ilen) { | 491 | while (ilen) { |
492 | intnodes[indx].addr = (unsigned long)*ireg++ << 32; | 492 | intnodes[indx].addr = (unsigned long)*ireg++ << 32; |
493 | ilen -= sizeof(uint); | 493 | ilen -= sizeof(uint); |
494 | intnodes[indx].addr |= *ireg++; | 494 | intnodes[indx].addr |= *ireg++; |
495 | ilen -= sizeof(uint); | 495 | ilen -= sizeof(uint); |
496 | intnodes[indx].size = (unsigned long)*ireg++ << 32; | 496 | intnodes[indx].size = (unsigned long)*ireg++ << 32; |
497 | ilen -= sizeof(uint); | 497 | ilen -= sizeof(uint); |
498 | intnodes[indx].size |= *ireg++; | 498 | intnodes[indx].size |= *ireg++; |
499 | ilen -= sizeof(uint); | 499 | ilen -= sizeof(uint); |
500 | indx++; | 500 | indx++; |
501 | if (indx >= NR_CPUS) break; | 501 | if (indx >= NR_CPUS) break; |
502 | } | 502 | } |
503 | 503 | ||
504 | np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation"); | 504 | np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation"); |
505 | if ((indx < NR_CPUS) && np) goto nextnode; | 505 | if ((indx < NR_CPUS) && np) goto nextnode; |
506 | 506 | ||
507 | /* Find the server numbers for the boot cpu. */ | 507 | /* Find the server numbers for the boot cpu. */ |
508 | for (np = of_find_node_by_type(NULL, "cpu"); | 508 | for (np = of_find_node_by_type(NULL, "cpu"); |
509 | np; | 509 | np; |
510 | np = of_find_node_by_type(np, "cpu")) { | 510 | np = of_find_node_by_type(np, "cpu")) { |
511 | ireg = (uint *)get_property(np, "reg", &ilen); | 511 | ireg = (uint *)get_property(np, "reg", &ilen); |
512 | if (ireg && ireg[0] == boot_cpuid_phys) { | 512 | if (ireg && ireg[0] == boot_cpuid_phys) { |
513 | ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", | 513 | ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", |
514 | &ilen); | 514 | &ilen); |
515 | i = ilen / sizeof(int); | 515 | i = ilen / sizeof(int); |
516 | if (ireg && i > 0) { | 516 | if (ireg && i > 0) { |
517 | default_server = ireg[0]; | 517 | default_server = ireg[0]; |
518 | default_distrib_server = ireg[i-1]; /* take last element */ | 518 | default_distrib_server = ireg[i-1]; /* take last element */ |
519 | } | 519 | } |
520 | ireg = (uint *)get_property(np, | 520 | ireg = (uint *)get_property(np, |
521 | "ibm,interrupt-server#-size", NULL); | 521 | "ibm,interrupt-server#-size", NULL); |
522 | if (ireg) | 522 | if (ireg) |
523 | interrupt_server_size = *ireg; | 523 | interrupt_server_size = *ireg; |
524 | break; | 524 | break; |
525 | } | 525 | } |
526 | } | 526 | } |
527 | of_node_put(np); | 527 | of_node_put(np); |
528 | 528 | ||
529 | intr_base = intnodes[0].addr; | 529 | intr_base = intnodes[0].addr; |
530 | intr_size = intnodes[0].size; | 530 | intr_size = intnodes[0].size; |
531 | 531 | ||
532 | np = of_find_node_by_type(NULL, "interrupt-controller"); | 532 | np = of_find_node_by_type(NULL, "interrupt-controller"); |
533 | if (!np) { | 533 | if (!np) { |
534 | printk(KERN_WARNING "xics: no ISA interrupt controller\n"); | 534 | printk(KERN_WARNING "xics: no ISA interrupt controller\n"); |
535 | xics_irq_8259_cascade_real = -1; | 535 | xics_irq_8259_cascade_real = -1; |
536 | xics_irq_8259_cascade = -1; | 536 | xics_irq_8259_cascade = -1; |
537 | } else { | 537 | } else { |
538 | ireg = (uint *) get_property(np, "interrupts", NULL); | 538 | ireg = (uint *) get_property(np, "interrupts", NULL); |
539 | if (!ireg) | 539 | if (!ireg) |
540 | panic("xics_init_IRQ: can't find ISA interrupts property"); | 540 | panic("xics_init_IRQ: can't find ISA interrupts property"); |
541 | 541 | ||
542 | xics_irq_8259_cascade_real = *ireg; | 542 | xics_irq_8259_cascade_real = *ireg; |
543 | xics_irq_8259_cascade | 543 | xics_irq_8259_cascade |
544 | = virt_irq_create_mapping(xics_irq_8259_cascade_real); | 544 | = virt_irq_create_mapping(xics_irq_8259_cascade_real); |
545 | of_node_put(np); | 545 | of_node_put(np); |
546 | } | 546 | } |
547 | 547 | ||
548 | if (systemcfg->platform == PLATFORM_PSERIES) { | 548 | if (systemcfg->platform == PLATFORM_PSERIES) { |
549 | #ifdef CONFIG_SMP | 549 | #ifdef CONFIG_SMP |
550 | for_each_cpu(i) { | 550 | for_each_cpu(i) { |
551 | int hard_id; | 551 | int hard_id; |
552 | 552 | ||
553 | /* FIXME: Do this dynamically! --RR */ | 553 | /* FIXME: Do this dynamically! --RR */ |
554 | if (!cpu_present(i)) | 554 | if (!cpu_present(i)) |
555 | continue; | 555 | continue; |
556 | 556 | ||
557 | hard_id = get_hard_smp_processor_id(i); | 557 | hard_id = get_hard_smp_processor_id(i); |
558 | xics_per_cpu[i] = ioremap(intnodes[hard_id].addr, | 558 | xics_per_cpu[i] = ioremap(intnodes[hard_id].addr, |
559 | intnodes[hard_id].size); | 559 | intnodes[hard_id].size); |
560 | } | 560 | } |
561 | #else | 561 | #else |
562 | xics_per_cpu[0] = ioremap(intr_base, intr_size); | 562 | xics_per_cpu[0] = ioremap(intr_base, intr_size); |
563 | #endif /* CONFIG_SMP */ | 563 | #endif /* CONFIG_SMP */ |
564 | } else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) { | 564 | } else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) { |
565 | ops = &pSeriesLP_ops; | 565 | ops = &pSeriesLP_ops; |
566 | } | 566 | } |
567 | 567 | ||
568 | xics_8259_pic.enable = i8259_pic.enable; | 568 | xics_8259_pic.enable = i8259_pic.enable; |
569 | xics_8259_pic.disable = i8259_pic.disable; | 569 | xics_8259_pic.disable = i8259_pic.disable; |
570 | xics_8259_pic.end = i8259_pic.end; | ||
570 | for (i = 0; i < 16; ++i) | 571 | for (i = 0; i < 16; ++i) |
571 | get_irq_desc(i)->handler = &xics_8259_pic; | 572 | get_irq_desc(i)->handler = &xics_8259_pic; |
572 | for (; i < NR_IRQS; ++i) | 573 | for (; i < NR_IRQS; ++i) |
573 | get_irq_desc(i)->handler = &xics_pic; | 574 | get_irq_desc(i)->handler = &xics_pic; |
574 | 575 | ||
575 | xics_setup_cpu(); | 576 | xics_setup_cpu(); |
576 | 577 | ||
577 | ppc64_boot_msg(0x21, "XICS Done"); | 578 | ppc64_boot_msg(0x21, "XICS Done"); |
578 | } | 579 | } |
579 | 580 | ||
580 | /* | 581 | /* |
581 | * We cant do this in init_IRQ because we need the memory subsystem up for | 582 | * We cant do this in init_IRQ because we need the memory subsystem up for |
582 | * request_irq() | 583 | * request_irq() |
583 | */ | 584 | */ |
584 | static int __init xics_setup_i8259(void) | 585 | static int __init xics_setup_i8259(void) |
585 | { | 586 | { |
586 | if (ppc64_interrupt_controller == IC_PPC_XIC && | 587 | if (ppc64_interrupt_controller == IC_PPC_XIC && |
587 | xics_irq_8259_cascade != -1) { | 588 | xics_irq_8259_cascade != -1) { |
588 | if (request_irq(irq_offset_up(xics_irq_8259_cascade), | 589 | if (request_irq(irq_offset_up(xics_irq_8259_cascade), |
589 | no_action, 0, "8259 cascade", NULL)) | 590 | no_action, 0, "8259 cascade", NULL)) |
590 | printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 " | 591 | printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 " |
591 | "cascade\n"); | 592 | "cascade\n"); |
592 | i8259_init(0, 0); | 593 | i8259_init(0, 0); |
593 | } | 594 | } |
594 | return 0; | 595 | return 0; |
595 | } | 596 | } |
596 | arch_initcall(xics_setup_i8259); | 597 | arch_initcall(xics_setup_i8259); |
597 | 598 | ||
598 | #ifdef CONFIG_SMP | 599 | #ifdef CONFIG_SMP |
599 | void xics_request_IPIs(void) | 600 | void xics_request_IPIs(void) |
600 | { | 601 | { |
601 | virt_irq_to_real_map[XICS_IPI] = XICS_IPI; | 602 | virt_irq_to_real_map[XICS_IPI] = XICS_IPI; |
602 | 603 | ||
603 | /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */ | 604 | /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */ |
604 | request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT, | 605 | request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT, |
605 | "IPI", NULL); | 606 | "IPI", NULL); |
606 | get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU; | 607 | get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU; |
607 | } | 608 | } |
608 | #endif | 609 | #endif |
609 | 610 | ||
610 | static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | 611 | static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) |
611 | { | 612 | { |
612 | unsigned int irq; | 613 | unsigned int irq; |
613 | int status; | 614 | int status; |
614 | int xics_status[2]; | 615 | int xics_status[2]; |
615 | unsigned long newmask; | 616 | unsigned long newmask; |
616 | cpumask_t tmp = CPU_MASK_NONE; | 617 | cpumask_t tmp = CPU_MASK_NONE; |
617 | 618 | ||
618 | irq = virt_irq_to_real(irq_offset_down(virq)); | 619 | irq = virt_irq_to_real(irq_offset_down(virq)); |
619 | if (irq == XICS_IPI || irq == NO_IRQ) | 620 | if (irq == XICS_IPI || irq == NO_IRQ) |
620 | return; | 621 | return; |
621 | 622 | ||
622 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | 623 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); |
623 | 624 | ||
624 | if (status) { | 625 | if (status) { |
625 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive " | 626 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive " |
626 | "returns %d\n", irq, status); | 627 | "returns %d\n", irq, status); |
627 | return; | 628 | return; |
628 | } | 629 | } |
629 | 630 | ||
630 | /* For the moment only implement delivery to all cpus or one cpu */ | 631 | /* For the moment only implement delivery to all cpus or one cpu */ |
631 | if (cpus_equal(cpumask, CPU_MASK_ALL)) { | 632 | if (cpus_equal(cpumask, CPU_MASK_ALL)) { |
632 | newmask = default_distrib_server; | 633 | newmask = default_distrib_server; |
633 | } else { | 634 | } else { |
634 | cpus_and(tmp, cpu_online_map, cpumask); | 635 | cpus_and(tmp, cpu_online_map, cpumask); |
635 | if (cpus_empty(tmp)) | 636 | if (cpus_empty(tmp)) |
636 | return; | 637 | return; |
637 | newmask = get_hard_smp_processor_id(first_cpu(tmp)); | 638 | newmask = get_hard_smp_processor_id(first_cpu(tmp)); |
638 | } | 639 | } |
639 | 640 | ||
640 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | 641 | status = rtas_call(ibm_set_xive, 3, 1, NULL, |
641 | irq, newmask, xics_status[1]); | 642 | irq, newmask, xics_status[1]); |
642 | 643 | ||
643 | if (status) { | 644 | if (status) { |
644 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive " | 645 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive " |
645 | "returns %d\n", irq, status); | 646 | "returns %d\n", irq, status); |
646 | return; | 647 | return; |
647 | } | 648 | } |
648 | } | 649 | } |
649 | 650 | ||
650 | void xics_teardown_cpu(int secondary) | 651 | void xics_teardown_cpu(int secondary) |
651 | { | 652 | { |
652 | int cpu = smp_processor_id(); | 653 | int cpu = smp_processor_id(); |
653 | 654 | ||
654 | ops->cppr_info(cpu, 0x00); | 655 | ops->cppr_info(cpu, 0x00); |
655 | iosync(); | 656 | iosync(); |
656 | 657 | ||
657 | /* | 658 | /* |
658 | * Some machines need to have at least one cpu in the GIQ, | 659 | * Some machines need to have at least one cpu in the GIQ, |
659 | * so leave the master cpu in the group. | 660 | * so leave the master cpu in the group. |
660 | */ | 661 | */ |
661 | if (secondary) { | 662 | if (secondary) { |
662 | /* | 663 | /* |
663 | * we need to EOI the IPI if we got here from kexec down IPI | 664 | * we need to EOI the IPI if we got here from kexec down IPI |
664 | * | 665 | * |
665 | * probably need to check all the other interrupts too | 666 | * probably need to check all the other interrupts too |
666 | * should we be flagging idle loop instead? | 667 | * should we be flagging idle loop instead? |
667 | * or creating some task to be scheduled? | 668 | * or creating some task to be scheduled? |
668 | */ | 669 | */ |
669 | ops->xirr_info_set(cpu, XICS_IPI); | 670 | ops->xirr_info_set(cpu, XICS_IPI); |
670 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | 671 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, |
671 | (1UL << interrupt_server_size) - 1 - | 672 | (1UL << interrupt_server_size) - 1 - |
672 | default_distrib_server, 0); | 673 | default_distrib_server, 0); |
673 | } | 674 | } |
674 | } | 675 | } |
675 | 676 | ||
676 | #ifdef CONFIG_HOTPLUG_CPU | 677 | #ifdef CONFIG_HOTPLUG_CPU |
677 | 678 | ||
678 | /* Interrupts are disabled. */ | 679 | /* Interrupts are disabled. */ |
679 | void xics_migrate_irqs_away(void) | 680 | void xics_migrate_irqs_away(void) |
680 | { | 681 | { |
681 | int status; | 682 | int status; |
682 | unsigned int irq, virq, cpu = smp_processor_id(); | 683 | unsigned int irq, virq, cpu = smp_processor_id(); |
683 | 684 | ||
684 | /* Reject any interrupt that was queued to us... */ | 685 | /* Reject any interrupt that was queued to us... */ |
685 | ops->cppr_info(cpu, 0); | 686 | ops->cppr_info(cpu, 0); |
686 | iosync(); | 687 | iosync(); |
687 | 688 | ||
688 | /* remove ourselves from the global interrupt queue */ | 689 | /* remove ourselves from the global interrupt queue */ |
689 | status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | 690 | status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, |
690 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 0); | 691 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 0); |
691 | WARN_ON(status < 0); | 692 | WARN_ON(status < 0); |
692 | 693 | ||
693 | /* Allow IPIs again... */ | 694 | /* Allow IPIs again... */ |
694 | ops->cppr_info(cpu, DEFAULT_PRIORITY); | 695 | ops->cppr_info(cpu, DEFAULT_PRIORITY); |
695 | iosync(); | 696 | iosync(); |
696 | 697 | ||
697 | for_each_irq(virq) { | 698 | for_each_irq(virq) { |
698 | irq_desc_t *desc; | 699 | irq_desc_t *desc; |
699 | int xics_status[2]; | 700 | int xics_status[2]; |
700 | unsigned long flags; | 701 | unsigned long flags; |
701 | 702 | ||
702 | /* We cant set affinity on ISA interrupts */ | 703 | /* We cant set affinity on ISA interrupts */ |
703 | if (virq < irq_offset_value()) | 704 | if (virq < irq_offset_value()) |
704 | continue; | 705 | continue; |
705 | 706 | ||
706 | desc = get_irq_desc(virq); | 707 | desc = get_irq_desc(virq); |
707 | irq = virt_irq_to_real(irq_offset_down(virq)); | 708 | irq = virt_irq_to_real(irq_offset_down(virq)); |
708 | 709 | ||
709 | /* We need to get IPIs still. */ | 710 | /* We need to get IPIs still. */ |
710 | if (irq == XICS_IPI || irq == NO_IRQ) | 711 | if (irq == XICS_IPI || irq == NO_IRQ) |
711 | continue; | 712 | continue; |
712 | 713 | ||
713 | /* We only need to migrate enabled IRQS */ | 714 | /* We only need to migrate enabled IRQS */ |
714 | if (desc == NULL || desc->handler == NULL | 715 | if (desc == NULL || desc->handler == NULL |
715 | || desc->action == NULL | 716 | || desc->action == NULL |
716 | || desc->handler->set_affinity == NULL) | 717 | || desc->handler->set_affinity == NULL) |
717 | continue; | 718 | continue; |
718 | 719 | ||
719 | spin_lock_irqsave(&desc->lock, flags); | 720 | spin_lock_irqsave(&desc->lock, flags); |
720 | 721 | ||
721 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | 722 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); |
722 | if (status) { | 723 | if (status) { |
723 | printk(KERN_ERR "migrate_irqs_away: irq=%u " | 724 | printk(KERN_ERR "migrate_irqs_away: irq=%u " |
724 | "ibm,get-xive returns %d\n", | 725 | "ibm,get-xive returns %d\n", |
725 | virq, status); | 726 | virq, status); |
726 | goto unlock; | 727 | goto unlock; |
727 | } | 728 | } |
728 | 729 | ||
729 | /* | 730 | /* |
730 | * We only support delivery to all cpus or to one cpu. | 731 | * We only support delivery to all cpus or to one cpu. |
731 | * The irq has to be migrated only in the single cpu | 732 | * The irq has to be migrated only in the single cpu |
732 | * case. | 733 | * case. |
733 | */ | 734 | */ |
734 | if (xics_status[0] != get_hard_smp_processor_id(cpu)) | 735 | if (xics_status[0] != get_hard_smp_processor_id(cpu)) |
735 | goto unlock; | 736 | goto unlock; |
736 | 737 | ||
737 | printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", | 738 | printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", |
738 | virq, cpu); | 739 | virq, cpu); |
739 | 740 | ||
740 | /* Reset affinity to all cpus */ | 741 | /* Reset affinity to all cpus */ |
741 | desc->handler->set_affinity(virq, CPU_MASK_ALL); | 742 | desc->handler->set_affinity(virq, CPU_MASK_ALL); |
742 | irq_affinity[virq] = CPU_MASK_ALL; | 743 | irq_affinity[virq] = CPU_MASK_ALL; |
743 | unlock: | 744 | unlock: |
744 | spin_unlock_irqrestore(&desc->lock, flags); | 745 | spin_unlock_irqrestore(&desc->lock, flags); |
745 | } | 746 | } |
746 | } | 747 | } |
747 | #endif | 748 | #endif |
748 | 749 |
arch/ppc64/kernel/irq.c
1 | /* | 1 | /* |
2 | * arch/ppc/kernel/irq.c | 2 | * arch/ppc/kernel/irq.c |
3 | * | 3 | * |
4 | * Derived from arch/i386/kernel/irq.c | 4 | * Derived from arch/i386/kernel/irq.c |
5 | * Copyright (C) 1992 Linus Torvalds | 5 | * Copyright (C) 1992 Linus Torvalds |
6 | * Adapted from arch/i386 by Gary Thomas | 6 | * Adapted from arch/i386 by Gary Thomas |
7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
8 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) | 8 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) |
9 | * Copyright (C) 1996 Cort Dougan | 9 | * Copyright (C) 1996 Cort Dougan |
10 | * Adapted for Power Macintosh by Paul Mackerras | 10 | * Adapted for Power Macintosh by Paul Mackerras |
11 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | 11 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) |
12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | 12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or | 14 | * This program is free software; you can redistribute it and/or |
15 | * modify it under the terms of the GNU General Public License | 15 | * modify it under the terms of the GNU General Public License |
16 | * as published by the Free Software Foundation; either version | 16 | * as published by the Free Software Foundation; either version |
17 | * 2 of the License, or (at your option) any later version. | 17 | * 2 of the License, or (at your option) any later version. |
18 | * | 18 | * |
19 | * This file contains the code used by various IRQ handling routines: | 19 | * This file contains the code used by various IRQ handling routines: |
20 | * asking for different IRQ's should be done through these routines | 20 | * asking for different IRQ's should be done through these routines |
21 | * instead of just grabbing them. Thus setups with different IRQ numbers | 21 | * instead of just grabbing them. Thus setups with different IRQ numbers |
22 | * shouldn't result in any weird surprises, and installing new handlers | 22 | * shouldn't result in any weird surprises, and installing new handlers |
23 | * should be easier. | 23 | * should be easier. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/threads.h> | 28 | #include <linux/threads.h> |
29 | #include <linux/kernel_stat.h> | 29 | #include <linux/kernel_stat.h> |
30 | #include <linux/signal.h> | 30 | #include <linux/signal.h> |
31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
32 | #include <linux/ioport.h> | 32 | #include <linux/ioport.h> |
33 | #include <linux/interrupt.h> | 33 | #include <linux/interrupt.h> |
34 | #include <linux/timex.h> | 34 | #include <linux/timex.h> |
35 | #include <linux/config.h> | 35 | #include <linux/config.h> |
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
39 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
40 | #include <linux/irq.h> | 40 | #include <linux/irq.h> |
41 | #include <linux/proc_fs.h> | 41 | #include <linux/proc_fs.h> |
42 | #include <linux/random.h> | 42 | #include <linux/random.h> |
43 | #include <linux/kallsyms.h> | 43 | #include <linux/kallsyms.h> |
44 | #include <linux/profile.h> | 44 | #include <linux/profile.h> |
45 | #include <linux/bitops.h> | 45 | #include <linux/bitops.h> |
46 | 46 | ||
47 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
48 | #include <asm/system.h> | 48 | #include <asm/system.h> |
49 | #include <asm/io.h> | 49 | #include <asm/io.h> |
50 | #include <asm/pgtable.h> | 50 | #include <asm/pgtable.h> |
51 | #include <asm/irq.h> | 51 | #include <asm/irq.h> |
52 | #include <asm/cache.h> | 52 | #include <asm/cache.h> |
53 | #include <asm/prom.h> | 53 | #include <asm/prom.h> |
54 | #include <asm/ptrace.h> | 54 | #include <asm/ptrace.h> |
55 | #include <asm/iseries/it_lp_queue.h> | 55 | #include <asm/iseries/it_lp_queue.h> |
56 | #include <asm/machdep.h> | 56 | #include <asm/machdep.h> |
57 | #include <asm/paca.h> | 57 | #include <asm/paca.h> |
58 | 58 | ||
59 | #ifdef CONFIG_SMP | 59 | #ifdef CONFIG_SMP |
60 | extern void iSeries_smp_message_recv( struct pt_regs * ); | 60 | extern void iSeries_smp_message_recv( struct pt_regs * ); |
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | extern irq_desc_t irq_desc[NR_IRQS]; | 63 | extern irq_desc_t irq_desc[NR_IRQS]; |
64 | EXPORT_SYMBOL(irq_desc); | 64 | EXPORT_SYMBOL(irq_desc); |
65 | 65 | ||
66 | int distribute_irqs = 1; | 66 | int distribute_irqs = 1; |
67 | int __irq_offset_value; | 67 | int __irq_offset_value; |
68 | int ppc_spurious_interrupts; | 68 | int ppc_spurious_interrupts; |
69 | u64 ppc64_interrupt_controller; | 69 | u64 ppc64_interrupt_controller; |
70 | 70 | ||
71 | int show_interrupts(struct seq_file *p, void *v) | 71 | int show_interrupts(struct seq_file *p, void *v) |
72 | { | 72 | { |
73 | int i = *(loff_t *) v, j; | 73 | int i = *(loff_t *) v, j; |
74 | struct irqaction * action; | 74 | struct irqaction * action; |
75 | irq_desc_t *desc; | 75 | irq_desc_t *desc; |
76 | unsigned long flags; | 76 | unsigned long flags; |
77 | 77 | ||
78 | if (i == 0) { | 78 | if (i == 0) { |
79 | seq_printf(p, " "); | 79 | seq_printf(p, " "); |
80 | for (j=0; j<NR_CPUS; j++) { | 80 | for (j=0; j<NR_CPUS; j++) { |
81 | if (cpu_online(j)) | 81 | if (cpu_online(j)) |
82 | seq_printf(p, "CPU%d ",j); | 82 | seq_printf(p, "CPU%d ",j); |
83 | } | 83 | } |
84 | seq_putc(p, '\n'); | 84 | seq_putc(p, '\n'); |
85 | } | 85 | } |
86 | 86 | ||
87 | if (i < NR_IRQS) { | 87 | if (i < NR_IRQS) { |
88 | desc = get_irq_desc(i); | 88 | desc = get_irq_desc(i); |
89 | spin_lock_irqsave(&desc->lock, flags); | 89 | spin_lock_irqsave(&desc->lock, flags); |
90 | action = desc->action; | 90 | action = desc->action; |
91 | if (!action || !action->handler) | 91 | if (!action || !action->handler) |
92 | goto skip; | 92 | goto skip; |
93 | seq_printf(p, "%3d: ", i); | 93 | seq_printf(p, "%3d: ", i); |
94 | #ifdef CONFIG_SMP | 94 | #ifdef CONFIG_SMP |
95 | for (j = 0; j < NR_CPUS; j++) { | 95 | for (j = 0; j < NR_CPUS; j++) { |
96 | if (cpu_online(j)) | 96 | if (cpu_online(j)) |
97 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 97 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
98 | } | 98 | } |
99 | #else | 99 | #else |
100 | seq_printf(p, "%10u ", kstat_irqs(i)); | 100 | seq_printf(p, "%10u ", kstat_irqs(i)); |
101 | #endif /* CONFIG_SMP */ | 101 | #endif /* CONFIG_SMP */ |
102 | if (desc->handler) | 102 | if (desc->handler) |
103 | seq_printf(p, " %s ", desc->handler->typename ); | 103 | seq_printf(p, " %s ", desc->handler->typename ); |
104 | else | 104 | else |
105 | seq_printf(p, " None "); | 105 | seq_printf(p, " None "); |
106 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); | 106 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); |
107 | seq_printf(p, " %s",action->name); | 107 | seq_printf(p, " %s",action->name); |
108 | for (action=action->next; action; action = action->next) | 108 | for (action=action->next; action; action = action->next) |
109 | seq_printf(p, ", %s", action->name); | 109 | seq_printf(p, ", %s", action->name); |
110 | seq_putc(p, '\n'); | 110 | seq_putc(p, '\n'); |
111 | skip: | 111 | skip: |
112 | spin_unlock_irqrestore(&desc->lock, flags); | 112 | spin_unlock_irqrestore(&desc->lock, flags); |
113 | } else if (i == NR_IRQS) | 113 | } else if (i == NR_IRQS) |
114 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); | 114 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); |
115 | return 0; | 115 | return 0; |
116 | } | 116 | } |
117 | 117 | ||
118 | #ifdef CONFIG_HOTPLUG_CPU | 118 | #ifdef CONFIG_HOTPLUG_CPU |
119 | void fixup_irqs(cpumask_t map) | 119 | void fixup_irqs(cpumask_t map) |
120 | { | 120 | { |
121 | unsigned int irq; | 121 | unsigned int irq; |
122 | static int warned; | 122 | static int warned; |
123 | 123 | ||
124 | for_each_irq(irq) { | 124 | for_each_irq(irq) { |
125 | cpumask_t mask; | 125 | cpumask_t mask; |
126 | 126 | ||
127 | if (irq_desc[irq].status & IRQ_PER_CPU) | 127 | if (irq_desc[irq].status & IRQ_PER_CPU) |
128 | continue; | 128 | continue; |
129 | 129 | ||
130 | cpus_and(mask, irq_affinity[irq], map); | 130 | cpus_and(mask, irq_affinity[irq], map); |
131 | if (any_online_cpu(mask) == NR_CPUS) { | 131 | if (any_online_cpu(mask) == NR_CPUS) { |
132 | printk("Breaking affinity for irq %i\n", irq); | 132 | printk("Breaking affinity for irq %i\n", irq); |
133 | mask = map; | 133 | mask = map; |
134 | } | 134 | } |
135 | if (irq_desc[irq].handler->set_affinity) | 135 | if (irq_desc[irq].handler->set_affinity) |
136 | irq_desc[irq].handler->set_affinity(irq, mask); | 136 | irq_desc[irq].handler->set_affinity(irq, mask); |
137 | else if (irq_desc[irq].action && !(warned++)) | 137 | else if (irq_desc[irq].action && !(warned++)) |
138 | printk("Cannot set affinity for irq %i\n", irq); | 138 | printk("Cannot set affinity for irq %i\n", irq); |
139 | } | 139 | } |
140 | 140 | ||
141 | local_irq_enable(); | 141 | local_irq_enable(); |
142 | mdelay(1); | 142 | mdelay(1); |
143 | local_irq_disable(); | 143 | local_irq_disable(); |
144 | } | 144 | } |
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | extern int noirqdebug; | ||
148 | |||
149 | /* | ||
150 | * Eventually, this should take an array of interrupts and an array size | ||
151 | * so it can dispatch multiple interrupts. | ||
152 | */ | ||
153 | void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq) | ||
154 | { | ||
155 | int status; | ||
156 | struct irqaction *action; | ||
157 | int cpu = smp_processor_id(); | ||
158 | irq_desc_t *desc = get_irq_desc(irq); | ||
159 | irqreturn_t action_ret; | ||
160 | |||
161 | kstat_cpu(cpu).irqs[irq]++; | ||
162 | |||
163 | if (desc->status & IRQ_PER_CPU) { | ||
164 | /* no locking required for CPU-local interrupts: */ | ||
165 | ack_irq(irq); | ||
166 | action_ret = handle_IRQ_event(irq, regs, desc->action); | ||
167 | desc->handler->end(irq); | ||
168 | return; | ||
169 | } | ||
170 | |||
171 | spin_lock(&desc->lock); | ||
172 | ack_irq(irq); | ||
173 | /* | ||
174 | REPLAY is when Linux resends an IRQ that was dropped earlier | ||
175 | WAITING is used by probe to mark irqs that are being tested | ||
176 | */ | ||
177 | status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | ||
178 | status |= IRQ_PENDING; /* we _want_ to handle it */ | ||
179 | |||
180 | /* | ||
181 | * If the IRQ is disabled for whatever reason, we cannot | ||
182 | * use the action we have. | ||
183 | */ | ||
184 | action = NULL; | ||
185 | if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { | ||
186 | action = desc->action; | ||
187 | if (!action || !action->handler) { | ||
188 | ppc_spurious_interrupts++; | ||
189 | printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq); | ||
190 | /* We can't call disable_irq here, it would deadlock */ | ||
191 | if (!desc->depth) | ||
192 | desc->depth = 1; | ||
193 | desc->status |= IRQ_DISABLED; | ||
194 | /* This is not a real spurrious interrupt, we | ||
195 | * have to eoi it, so we jump to out | ||
196 | */ | ||
197 | mask_irq(irq); | ||
198 | goto out; | ||
199 | } | ||
200 | status &= ~IRQ_PENDING; /* we commit to handling */ | ||
201 | status |= IRQ_INPROGRESS; /* we are handling it */ | ||
202 | } | ||
203 | desc->status = status; | ||
204 | |||
205 | /* | ||
206 | * If there is no IRQ handler or it was disabled, exit early. | ||
207 | Since we set PENDING, if another processor is handling | ||
208 | a different instance of this same irq, the other processor | ||
209 | will take care of it. | ||
210 | */ | ||
211 | if (unlikely(!action)) | ||
212 | goto out; | ||
213 | |||
214 | /* | ||
215 | * Edge triggered interrupts need to remember | ||
216 | * pending events. | ||
217 | * This applies to any hw interrupts that allow a second | ||
218 | * instance of the same irq to arrive while we are in do_IRQ | ||
219 | * or in the handler. But the code here only handles the _second_ | ||
220 | * instance of the irq, not the third or fourth. So it is mostly | ||
221 | * useful for irq hardware that does not mask cleanly in an | ||
222 | * SMP environment. | ||
223 | */ | ||
224 | for (;;) { | ||
225 | spin_unlock(&desc->lock); | ||
226 | |||
227 | action_ret = handle_IRQ_event(irq, regs, action); | ||
228 | |||
229 | spin_lock(&desc->lock); | ||
230 | if (!noirqdebug) | ||
231 | note_interrupt(irq, desc, action_ret, regs); | ||
232 | if (likely(!(desc->status & IRQ_PENDING))) | ||
233 | break; | ||
234 | desc->status &= ~IRQ_PENDING; | ||
235 | } | ||
236 | out: | ||
237 | desc->status &= ~IRQ_INPROGRESS; | ||
238 | /* | ||
239 | * The ->end() handler has to deal with interrupts which got | ||
240 | * disabled while the handler was running. | ||
241 | */ | ||
242 | if (desc->handler) { | ||
243 | if (desc->handler->end) | ||
244 | desc->handler->end(irq); | ||
245 | else if (desc->handler->enable) | ||
246 | desc->handler->enable(irq); | ||
247 | } | ||
248 | spin_unlock(&desc->lock); | ||
249 | } | ||
250 | |||
251 | #ifdef CONFIG_PPC_ISERIES | 147 | #ifdef CONFIG_PPC_ISERIES |
252 | void do_IRQ(struct pt_regs *regs) | 148 | void do_IRQ(struct pt_regs *regs) |
253 | { | 149 | { |
254 | struct paca_struct *lpaca; | 150 | struct paca_struct *lpaca; |
255 | 151 | ||
256 | irq_enter(); | 152 | irq_enter(); |
257 | 153 | ||
258 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 154 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
259 | /* Debugging check for stack overflow: is there less than 2KB free? */ | 155 | /* Debugging check for stack overflow: is there less than 2KB free? */ |
260 | { | 156 | { |
261 | long sp; | 157 | long sp; |
262 | 158 | ||
263 | sp = __get_SP() & (THREAD_SIZE-1); | 159 | sp = __get_SP() & (THREAD_SIZE-1); |
264 | 160 | ||
265 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | 161 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { |
266 | printk("do_IRQ: stack overflow: %ld\n", | 162 | printk("do_IRQ: stack overflow: %ld\n", |
267 | sp - sizeof(struct thread_info)); | 163 | sp - sizeof(struct thread_info)); |
268 | dump_stack(); | 164 | dump_stack(); |
269 | } | 165 | } |
270 | } | 166 | } |
271 | #endif | 167 | #endif |
272 | 168 | ||
273 | lpaca = get_paca(); | 169 | lpaca = get_paca(); |
274 | #ifdef CONFIG_SMP | 170 | #ifdef CONFIG_SMP |
275 | if (lpaca->lppaca.int_dword.fields.ipi_cnt) { | 171 | if (lpaca->lppaca.int_dword.fields.ipi_cnt) { |
276 | lpaca->lppaca.int_dword.fields.ipi_cnt = 0; | 172 | lpaca->lppaca.int_dword.fields.ipi_cnt = 0; |
277 | iSeries_smp_message_recv(regs); | 173 | iSeries_smp_message_recv(regs); |
278 | } | 174 | } |
279 | #endif /* CONFIG_SMP */ | 175 | #endif /* CONFIG_SMP */ |
280 | if (hvlpevent_is_pending()) | 176 | if (hvlpevent_is_pending()) |
281 | process_hvlpevents(regs); | 177 | process_hvlpevents(regs); |
282 | 178 | ||
283 | irq_exit(); | 179 | irq_exit(); |
284 | 180 | ||
285 | if (lpaca->lppaca.int_dword.fields.decr_int) { | 181 | if (lpaca->lppaca.int_dword.fields.decr_int) { |
286 | lpaca->lppaca.int_dword.fields.decr_int = 0; | 182 | lpaca->lppaca.int_dword.fields.decr_int = 0; |
287 | /* Signal a fake decrementer interrupt */ | 183 | /* Signal a fake decrementer interrupt */ |
288 | timer_interrupt(regs); | 184 | timer_interrupt(regs); |
289 | } | 185 | } |
290 | } | 186 | } |
291 | 187 | ||
292 | #else /* CONFIG_PPC_ISERIES */ | 188 | #else /* CONFIG_PPC_ISERIES */ |
293 | 189 | ||
294 | void do_IRQ(struct pt_regs *regs) | 190 | void do_IRQ(struct pt_regs *regs) |
295 | { | 191 | { |
296 | int irq; | 192 | int irq; |
297 | #ifdef CONFIG_IRQSTACKS | 193 | #ifdef CONFIG_IRQSTACKS |
298 | struct thread_info *curtp, *irqtp; | 194 | struct thread_info *curtp, *irqtp; |
299 | #endif | 195 | #endif |
300 | 196 | ||
301 | irq_enter(); | 197 | irq_enter(); |
302 | 198 | ||
303 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 199 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
304 | /* Debugging check for stack overflow: is there less than 2KB free? */ | 200 | /* Debugging check for stack overflow: is there less than 2KB free? */ |
305 | { | 201 | { |
306 | long sp; | 202 | long sp; |
307 | 203 | ||
308 | sp = __get_SP() & (THREAD_SIZE-1); | 204 | sp = __get_SP() & (THREAD_SIZE-1); |
309 | 205 | ||
310 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | 206 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { |
311 | printk("do_IRQ: stack overflow: %ld\n", | 207 | printk("do_IRQ: stack overflow: %ld\n", |
312 | sp - sizeof(struct thread_info)); | 208 | sp - sizeof(struct thread_info)); |
313 | dump_stack(); | 209 | dump_stack(); |
314 | } | 210 | } |
315 | } | 211 | } |
316 | #endif | 212 | #endif |
317 | 213 | ||
318 | irq = ppc_md.get_irq(regs); | 214 | irq = ppc_md.get_irq(regs); |
319 | 215 | ||
320 | if (irq >= 0) { | 216 | if (irq >= 0) { |
321 | #ifdef CONFIG_IRQSTACKS | 217 | #ifdef CONFIG_IRQSTACKS |
322 | /* Switch to the irq stack to handle this */ | 218 | /* Switch to the irq stack to handle this */ |
323 | curtp = current_thread_info(); | 219 | curtp = current_thread_info(); |
324 | irqtp = hardirq_ctx[smp_processor_id()]; | 220 | irqtp = hardirq_ctx[smp_processor_id()]; |
325 | if (curtp != irqtp) { | 221 | if (curtp != irqtp) { |
326 | irqtp->task = curtp->task; | 222 | irqtp->task = curtp->task; |
327 | irqtp->flags = 0; | 223 | irqtp->flags = 0; |
328 | call_ppc_irq_dispatch_handler(regs, irq, irqtp); | 224 | call___do_IRQ(irq, regs, irqtp); |
329 | irqtp->task = NULL; | 225 | irqtp->task = NULL; |
330 | if (irqtp->flags) | 226 | if (irqtp->flags) |
331 | set_bits(irqtp->flags, &curtp->flags); | 227 | set_bits(irqtp->flags, &curtp->flags); |
332 | } else | 228 | } else |
333 | #endif | 229 | #endif |
334 | ppc_irq_dispatch_handler(regs, irq); | 230 | __do_IRQ(irq, regs); |
335 | } else | 231 | } else |
336 | /* That's not SMP safe ... but who cares ? */ | 232 | /* That's not SMP safe ... but who cares ? */ |
337 | ppc_spurious_interrupts++; | 233 | ppc_spurious_interrupts++; |
338 | 234 | ||
339 | irq_exit(); | 235 | irq_exit(); |
340 | } | 236 | } |
341 | #endif /* CONFIG_PPC_ISERIES */ | 237 | #endif /* CONFIG_PPC_ISERIES */ |
342 | 238 | ||
343 | void __init init_IRQ(void) | 239 | void __init init_IRQ(void) |
344 | { | 240 | { |
345 | static int once = 0; | 241 | static int once = 0; |
346 | 242 | ||
347 | if (once) | 243 | if (once) |
348 | return; | 244 | return; |
349 | 245 | ||
350 | once++; | 246 | once++; |
351 | 247 | ||
352 | ppc_md.init_IRQ(); | 248 | ppc_md.init_IRQ(); |
353 | irq_ctx_init(); | 249 | irq_ctx_init(); |
354 | } | 250 | } |
355 | 251 | ||
356 | #ifndef CONFIG_PPC_ISERIES | 252 | #ifndef CONFIG_PPC_ISERIES |
357 | /* | 253 | /* |
358 | * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. | 254 | * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. |
359 | */ | 255 | */ |
360 | 256 | ||
361 | #define UNDEFINED_IRQ 0xffffffff | 257 | #define UNDEFINED_IRQ 0xffffffff |
362 | unsigned int virt_irq_to_real_map[NR_IRQS]; | 258 | unsigned int virt_irq_to_real_map[NR_IRQS]; |
363 | 259 | ||
364 | /* | 260 | /* |
365 | * Don't use virtual irqs 0, 1, 2 for devices. | 261 | * Don't use virtual irqs 0, 1, 2 for devices. |
366 | * The pcnet32 driver considers interrupt numbers < 2 to be invalid, | 262 | * The pcnet32 driver considers interrupt numbers < 2 to be invalid, |
367 | * and 2 is the XICS IPI interrupt. | 263 | * and 2 is the XICS IPI interrupt. |
368 | * We limit virtual irqs to 17 less than NR_IRQS so that when we | 264 | * We limit virtual irqs to 17 less than NR_IRQS so that when we |
369 | * offset them by 16 (to reserve the first 16 for ISA interrupts) | 265 | * offset them by 16 (to reserve the first 16 for ISA interrupts) |
370 | * we don't end up with an interrupt number >= NR_IRQS. | 266 | * we don't end up with an interrupt number >= NR_IRQS. |
371 | */ | 267 | */ |
372 | #define MIN_VIRT_IRQ 3 | 268 | #define MIN_VIRT_IRQ 3 |
373 | #define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1) | 269 | #define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1) |
374 | #define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1) | 270 | #define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1) |
375 | 271 | ||
376 | void | 272 | void |
377 | virt_irq_init(void) | 273 | virt_irq_init(void) |
378 | { | 274 | { |
379 | int i; | 275 | int i; |
380 | for (i = 0; i < NR_IRQS; i++) | 276 | for (i = 0; i < NR_IRQS; i++) |
381 | virt_irq_to_real_map[i] = UNDEFINED_IRQ; | 277 | virt_irq_to_real_map[i] = UNDEFINED_IRQ; |
382 | } | 278 | } |
383 | 279 | ||
384 | /* Create a mapping for a real_irq if it doesn't already exist. | 280 | /* Create a mapping for a real_irq if it doesn't already exist. |
385 | * Return the virtual irq as a convenience. | 281 | * Return the virtual irq as a convenience. |
386 | */ | 282 | */ |
387 | int virt_irq_create_mapping(unsigned int real_irq) | 283 | int virt_irq_create_mapping(unsigned int real_irq) |
388 | { | 284 | { |
389 | unsigned int virq, first_virq; | 285 | unsigned int virq, first_virq; |
390 | static int warned; | 286 | static int warned; |
391 | 287 | ||
392 | if (ppc64_interrupt_controller == IC_OPEN_PIC) | 288 | if (ppc64_interrupt_controller == IC_OPEN_PIC) |
393 | return real_irq; /* no mapping for openpic (for now) */ | 289 | return real_irq; /* no mapping for openpic (for now) */ |
394 | 290 | ||
395 | if (ppc64_interrupt_controller == IC_CELL_PIC) | 291 | if (ppc64_interrupt_controller == IC_CELL_PIC) |
396 | return real_irq; /* no mapping for iic either */ | 292 | return real_irq; /* no mapping for iic either */ |
397 | 293 | ||
398 | /* don't map interrupts < MIN_VIRT_IRQ */ | 294 | /* don't map interrupts < MIN_VIRT_IRQ */ |
399 | if (real_irq < MIN_VIRT_IRQ) { | 295 | if (real_irq < MIN_VIRT_IRQ) { |
400 | virt_irq_to_real_map[real_irq] = real_irq; | 296 | virt_irq_to_real_map[real_irq] = real_irq; |
401 | return real_irq; | 297 | return real_irq; |
402 | } | 298 | } |
403 | 299 | ||
404 | /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */ | 300 | /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */ |
405 | virq = real_irq; | 301 | virq = real_irq; |
406 | if (virq > MAX_VIRT_IRQ) | 302 | if (virq > MAX_VIRT_IRQ) |
407 | virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; | 303 | virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; |
408 | 304 | ||
409 | /* search for this number or a free slot */ | 305 | /* search for this number or a free slot */ |
410 | first_virq = virq; | 306 | first_virq = virq; |
411 | while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { | 307 | while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { |
412 | if (virt_irq_to_real_map[virq] == real_irq) | 308 | if (virt_irq_to_real_map[virq] == real_irq) |
413 | return virq; | 309 | return virq; |
414 | if (++virq > MAX_VIRT_IRQ) | 310 | if (++virq > MAX_VIRT_IRQ) |
415 | virq = MIN_VIRT_IRQ; | 311 | virq = MIN_VIRT_IRQ; |
416 | if (virq == first_virq) | 312 | if (virq == first_virq) |
417 | goto nospace; /* oops, no free slots */ | 313 | goto nospace; /* oops, no free slots */ |
418 | } | 314 | } |
419 | 315 | ||
420 | virt_irq_to_real_map[virq] = real_irq; | 316 | virt_irq_to_real_map[virq] = real_irq; |
421 | return virq; | 317 | return virq; |
422 | 318 | ||
423 | nospace: | 319 | nospace: |
424 | if (!warned) { | 320 | if (!warned) { |
425 | printk(KERN_CRIT "Interrupt table is full\n"); | 321 | printk(KERN_CRIT "Interrupt table is full\n"); |
426 | printk(KERN_CRIT "Increase NR_IRQS (currently %d) " | 322 | printk(KERN_CRIT "Increase NR_IRQS (currently %d) " |
427 | "in your kernel sources and rebuild.\n", NR_IRQS); | 323 | "in your kernel sources and rebuild.\n", NR_IRQS); |
428 | warned = 1; | 324 | warned = 1; |
429 | } | 325 | } |
430 | return NO_IRQ; | 326 | return NO_IRQ; |
431 | } | 327 | } |
432 | 328 | ||
433 | /* | 329 | /* |
434 | * In most cases will get a hit on the very first slot checked in the | 330 | * In most cases will get a hit on the very first slot checked in the |
435 | * virt_irq_to_real_map. Only when there are a large number of | 331 | * virt_irq_to_real_map. Only when there are a large number of |
436 | * IRQs will this be expensive. | 332 | * IRQs will this be expensive. |
437 | */ | 333 | */ |
438 | unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) | 334 | unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) |
439 | { | 335 | { |
440 | unsigned int virq; | 336 | unsigned int virq; |
441 | unsigned int first_virq; | 337 | unsigned int first_virq; |
442 | 338 | ||
443 | virq = real_irq; | 339 | virq = real_irq; |
444 | 340 | ||
445 | if (virq > MAX_VIRT_IRQ) | 341 | if (virq > MAX_VIRT_IRQ) |
446 | virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; | 342 | virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; |
447 | 343 | ||
448 | first_virq = virq; | 344 | first_virq = virq; |
449 | 345 | ||
450 | do { | 346 | do { |
451 | if (virt_irq_to_real_map[virq] == real_irq) | 347 | if (virt_irq_to_real_map[virq] == real_irq) |
452 | return virq; | 348 | return virq; |
453 | 349 | ||
454 | virq++; | 350 | virq++; |
455 | 351 | ||
456 | if (virq >= MAX_VIRT_IRQ) | 352 | if (virq >= MAX_VIRT_IRQ) |
457 | virq = 0; | 353 | virq = 0; |
458 | 354 | ||
459 | } while (first_virq != virq); | 355 | } while (first_virq != virq); |
460 | 356 | ||
461 | return NO_IRQ; | 357 | return NO_IRQ; |
462 | 358 | ||
463 | } | 359 | } |
464 | 360 | ||
465 | #endif /* CONFIG_PPC_ISERIES */ | 361 | #endif /* CONFIG_PPC_ISERIES */ |
466 | 362 | ||
467 | #ifdef CONFIG_IRQSTACKS | 363 | #ifdef CONFIG_IRQSTACKS |
468 | struct thread_info *softirq_ctx[NR_CPUS]; | 364 | struct thread_info *softirq_ctx[NR_CPUS]; |
469 | struct thread_info *hardirq_ctx[NR_CPUS]; | 365 | struct thread_info *hardirq_ctx[NR_CPUS]; |
470 | 366 | ||
471 | void irq_ctx_init(void) | 367 | void irq_ctx_init(void) |
472 | { | 368 | { |
473 | struct thread_info *tp; | 369 | struct thread_info *tp; |
474 | int i; | 370 | int i; |
475 | 371 | ||
476 | for_each_cpu(i) { | 372 | for_each_cpu(i) { |
477 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); | 373 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
478 | tp = softirq_ctx[i]; | 374 | tp = softirq_ctx[i]; |
479 | tp->cpu = i; | 375 | tp->cpu = i; |
480 | tp->preempt_count = SOFTIRQ_OFFSET; | 376 | tp->preempt_count = SOFTIRQ_OFFSET; |
481 | 377 | ||
482 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | 378 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); |
483 | tp = hardirq_ctx[i]; | 379 | tp = hardirq_ctx[i]; |
484 | tp->cpu = i; | 380 | tp->cpu = i; |
485 | tp->preempt_count = HARDIRQ_OFFSET; | 381 | tp->preempt_count = HARDIRQ_OFFSET; |
486 | } | 382 | } |
487 | } | 383 | } |
488 | 384 | ||
489 | void do_softirq(void) | 385 | void do_softirq(void) |
490 | { | 386 | { |
491 | unsigned long flags; | 387 | unsigned long flags; |
492 | struct thread_info *curtp, *irqtp; | 388 | struct thread_info *curtp, *irqtp; |
493 | 389 | ||
494 | if (in_interrupt()) | 390 | if (in_interrupt()) |
495 | return; | 391 | return; |
496 | 392 | ||
497 | local_irq_save(flags); | 393 | local_irq_save(flags); |
498 | 394 | ||
499 | if (local_softirq_pending()) { | 395 | if (local_softirq_pending()) { |
500 | curtp = current_thread_info(); | 396 | curtp = current_thread_info(); |
501 | irqtp = softirq_ctx[smp_processor_id()]; | 397 | irqtp = softirq_ctx[smp_processor_id()]; |
502 | irqtp->task = curtp->task; | 398 | irqtp->task = curtp->task; |
503 | call_do_softirq(irqtp); | 399 | call_do_softirq(irqtp); |
504 | irqtp->task = NULL; | 400 | irqtp->task = NULL; |
505 | } | 401 | } |
506 | 402 | ||
507 | local_irq_restore(flags); | 403 | local_irq_restore(flags); |
508 | } | 404 | } |
509 | EXPORT_SYMBOL(do_softirq); | 405 | EXPORT_SYMBOL(do_softirq); |
510 | 406 | ||
511 | #endif /* CONFIG_IRQSTACKS */ | 407 | #endif /* CONFIG_IRQSTACKS */ |
512 | 408 | ||
513 | static int __init setup_noirqdistrib(char *str) | 409 | static int __init setup_noirqdistrib(char *str) |
514 | { | 410 | { |
515 | distribute_irqs = 0; | 411 | distribute_irqs = 0; |
516 | return 1; | 412 | return 1; |
517 | } | 413 | } |
518 | 414 | ||
519 | __setup("noirqdistrib", setup_noirqdistrib); | 415 | __setup("noirqdistrib", setup_noirqdistrib); |
520 | 416 |
arch/ppc64/kernel/misc.S
1 | /* | 1 | /* |
2 | * arch/ppc/kernel/misc.S | 2 | * arch/ppc/kernel/misc.S |
3 | * | 3 | * |
4 | * | 4 | * |
5 | * | 5 | * |
6 | * This file contains miscellaneous low-level functions. | 6 | * This file contains miscellaneous low-level functions. |
7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
8 | * | 8 | * |
9 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) | 9 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) |
10 | * and Paul Mackerras. | 10 | * and Paul Mackerras. |
11 | * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) | 11 | * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) |
12 | * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) | 12 | * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or | 14 | * This program is free software; you can redistribute it and/or |
15 | * modify it under the terms of the GNU General Public License | 15 | * modify it under the terms of the GNU General Public License |
16 | * as published by the Free Software Foundation; either version | 16 | * as published by the Free Software Foundation; either version |
17 | * 2 of the License, or (at your option) any later version. | 17 | * 2 of the License, or (at your option) any later version. |
18 | * | 18 | * |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/config.h> | 21 | #include <linux/config.h> |
22 | #include <linux/sys.h> | 22 | #include <linux/sys.h> |
23 | #include <asm/unistd.h> | 23 | #include <asm/unistd.h> |
24 | #include <asm/errno.h> | 24 | #include <asm/errno.h> |
25 | #include <asm/processor.h> | 25 | #include <asm/processor.h> |
26 | #include <asm/page.h> | 26 | #include <asm/page.h> |
27 | #include <asm/cache.h> | 27 | #include <asm/cache.h> |
28 | #include <asm/ppc_asm.h> | 28 | #include <asm/ppc_asm.h> |
29 | #include <asm/asm-offsets.h> | 29 | #include <asm/asm-offsets.h> |
30 | #include <asm/cputable.h> | 30 | #include <asm/cputable.h> |
31 | #include <asm/thread_info.h> | 31 | #include <asm/thread_info.h> |
32 | 32 | ||
33 | .text | 33 | .text |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Returns (address we were linked at) - (address we are running at) | 36 | * Returns (address we were linked at) - (address we are running at) |
37 | * for use before the text and data are mapped to KERNELBASE. | 37 | * for use before the text and data are mapped to KERNELBASE. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | _GLOBAL(reloc_offset) | 40 | _GLOBAL(reloc_offset) |
41 | mflr r0 | 41 | mflr r0 |
42 | bl 1f | 42 | bl 1f |
43 | 1: mflr r3 | 43 | 1: mflr r3 |
44 | LOADADDR(r4,1b) | 44 | LOADADDR(r4,1b) |
45 | sub r3,r4,r3 | 45 | sub r3,r4,r3 |
46 | mtlr r0 | 46 | mtlr r0 |
47 | blr | 47 | blr |
48 | 48 | ||
49 | _GLOBAL(get_msr) | 49 | _GLOBAL(get_msr) |
50 | mfmsr r3 | 50 | mfmsr r3 |
51 | blr | 51 | blr |
52 | 52 | ||
53 | _GLOBAL(get_dar) | 53 | _GLOBAL(get_dar) |
54 | mfdar r3 | 54 | mfdar r3 |
55 | blr | 55 | blr |
56 | 56 | ||
57 | _GLOBAL(get_srr0) | 57 | _GLOBAL(get_srr0) |
58 | mfsrr0 r3 | 58 | mfsrr0 r3 |
59 | blr | 59 | blr |
60 | 60 | ||
61 | _GLOBAL(get_srr1) | 61 | _GLOBAL(get_srr1) |
62 | mfsrr1 r3 | 62 | mfsrr1 r3 |
63 | blr | 63 | blr |
64 | 64 | ||
65 | _GLOBAL(get_sp) | 65 | _GLOBAL(get_sp) |
66 | mr r3,r1 | 66 | mr r3,r1 |
67 | blr | 67 | blr |
68 | 68 | ||
69 | #ifdef CONFIG_IRQSTACKS | 69 | #ifdef CONFIG_IRQSTACKS |
70 | _GLOBAL(call_do_softirq) | 70 | _GLOBAL(call_do_softirq) |
71 | mflr r0 | 71 | mflr r0 |
72 | std r0,16(r1) | 72 | std r0,16(r1) |
73 | stdu r1,THREAD_SIZE-112(r3) | 73 | stdu r1,THREAD_SIZE-112(r3) |
74 | mr r1,r3 | 74 | mr r1,r3 |
75 | bl .__do_softirq | 75 | bl .__do_softirq |
76 | ld r1,0(r1) | 76 | ld r1,0(r1) |
77 | ld r0,16(r1) | 77 | ld r0,16(r1) |
78 | mtlr r0 | 78 | mtlr r0 |
79 | blr | 79 | blr |
80 | 80 | ||
81 | _GLOBAL(call_ppc_irq_dispatch_handler) | 81 | _GLOBAL(call___do_IRQ) |
82 | mflr r0 | 82 | mflr r0 |
83 | std r0,16(r1) | 83 | std r0,16(r1) |
84 | stdu r1,THREAD_SIZE-112(r5) | 84 | stdu r1,THREAD_SIZE-112(r5) |
85 | mr r1,r5 | 85 | mr r1,r5 |
86 | bl .ppc_irq_dispatch_handler | 86 | bl .__do_IRQ |
87 | ld r1,0(r1) | 87 | ld r1,0(r1) |
88 | ld r0,16(r1) | 88 | ld r0,16(r1) |
89 | mtlr r0 | 89 | mtlr r0 |
90 | blr | 90 | blr |
91 | #endif /* CONFIG_IRQSTACKS */ | 91 | #endif /* CONFIG_IRQSTACKS */ |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * To be called by C code which needs to do some operations with MMU | 94 | * To be called by C code which needs to do some operations with MMU |
95 | * disabled. Note that interrupts have to be disabled by the caller | 95 | * disabled. Note that interrupts have to be disabled by the caller |
96 | * prior to calling us. The code called _MUST_ be in the RMO of course | 96 | * prior to calling us. The code called _MUST_ be in the RMO of course |
97 | * and part of the linear mapping as we don't attempt to translate the | 97 | * and part of the linear mapping as we don't attempt to translate the |
98 | * stack pointer at all. The function is called with the stack switched | 98 | * stack pointer at all. The function is called with the stack switched |
99 | * to this CPU emergency stack | 99 | * to this CPU emergency stack |
100 | * | 100 | * |
101 | * prototype is void *call_with_mmu_off(void *func, void *data); | 101 | * prototype is void *call_with_mmu_off(void *func, void *data); |
102 | * | 102 | * |
103 | * the called function is expected to be of the form | 103 | * the called function is expected to be of the form |
104 | * | 104 | * |
105 | * void *called(void *data); | 105 | * void *called(void *data); |
106 | */ | 106 | */ |
107 | _GLOBAL(call_with_mmu_off) | 107 | _GLOBAL(call_with_mmu_off) |
108 | mflr r0 /* get link, save it on stackframe */ | 108 | mflr r0 /* get link, save it on stackframe */ |
109 | std r0,16(r1) | 109 | std r0,16(r1) |
110 | mr r1,r5 /* save old stack ptr */ | 110 | mr r1,r5 /* save old stack ptr */ |
111 | ld r1,PACAEMERGSP(r13) /* get emerg. stack */ | 111 | ld r1,PACAEMERGSP(r13) /* get emerg. stack */ |
112 | subi r1,r1,STACK_FRAME_OVERHEAD | 112 | subi r1,r1,STACK_FRAME_OVERHEAD |
113 | std r0,16(r1) /* save link on emerg. stack */ | 113 | std r0,16(r1) /* save link on emerg. stack */ |
114 | std r5,0(r1) /* save old stack ptr in backchain */ | 114 | std r5,0(r1) /* save old stack ptr in backchain */ |
115 | ld r3,0(r3) /* get to real function ptr (assume same TOC) */ | 115 | ld r3,0(r3) /* get to real function ptr (assume same TOC) */ |
116 | bl 2f /* we need LR to return, continue at label 2 */ | 116 | bl 2f /* we need LR to return, continue at label 2 */ |
117 | 117 | ||
118 | ld r0,16(r1) /* we return here from the call, get LR and */ | 118 | ld r0,16(r1) /* we return here from the call, get LR and */ |
119 | ld r1,0(r1) /* .. old stack ptr */ | 119 | ld r1,0(r1) /* .. old stack ptr */ |
120 | mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */ | 120 | mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */ |
121 | mfmsr r4 | 121 | mfmsr r4 |
122 | ori r4,r4,MSR_IR|MSR_DR | 122 | ori r4,r4,MSR_IR|MSR_DR |
123 | mtspr SPRN_SRR1,r4 | 123 | mtspr SPRN_SRR1,r4 |
124 | rfid | 124 | rfid |
125 | 125 | ||
126 | 2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */ | 126 | 2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */ |
127 | mr r3,r4 /* get parameter */ | 127 | mr r3,r4 /* get parameter */ |
128 | mfmsr r0 | 128 | mfmsr r0 |
129 | ori r0,r0,MSR_IR|MSR_DR | 129 | ori r0,r0,MSR_IR|MSR_DR |
130 | xori r0,r0,MSR_IR|MSR_DR | 130 | xori r0,r0,MSR_IR|MSR_DR |
131 | mtspr SPRN_SRR1,r0 | 131 | mtspr SPRN_SRR1,r0 |
132 | rfid | 132 | rfid |
133 | 133 | ||
134 | 134 | ||
135 | .section ".toc","aw" | 135 | .section ".toc","aw" |
136 | PPC64_CACHES: | 136 | PPC64_CACHES: |
137 | .tc ppc64_caches[TC],ppc64_caches | 137 | .tc ppc64_caches[TC],ppc64_caches |
138 | .section ".text" | 138 | .section ".text" |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Write any modified data cache blocks out to memory | 141 | * Write any modified data cache blocks out to memory |
142 | * and invalidate the corresponding instruction cache blocks. | 142 | * and invalidate the corresponding instruction cache blocks. |
143 | * | 143 | * |
144 | * flush_icache_range(unsigned long start, unsigned long stop) | 144 | * flush_icache_range(unsigned long start, unsigned long stop) |
145 | * | 145 | * |
146 | * flush all bytes from start through stop-1 inclusive | 146 | * flush all bytes from start through stop-1 inclusive |
147 | */ | 147 | */ |
148 | 148 | ||
149 | _KPROBE(__flush_icache_range) | 149 | _KPROBE(__flush_icache_range) |
150 | 150 | ||
151 | /* | 151 | /* |
152 | * Flush the data cache to memory | 152 | * Flush the data cache to memory |
153 | * | 153 | * |
154 | * Different systems have different cache line sizes | 154 | * Different systems have different cache line sizes |
155 | * and in some cases i-cache and d-cache line sizes differ from | 155 | * and in some cases i-cache and d-cache line sizes differ from |
156 | * each other. | 156 | * each other. |
157 | */ | 157 | */ |
158 | ld r10,PPC64_CACHES@toc(r2) | 158 | ld r10,PPC64_CACHES@toc(r2) |
159 | lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */ | 159 | lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */ |
160 | addi r5,r7,-1 | 160 | addi r5,r7,-1 |
161 | andc r6,r3,r5 /* round low to line bdy */ | 161 | andc r6,r3,r5 /* round low to line bdy */ |
162 | subf r8,r6,r4 /* compute length */ | 162 | subf r8,r6,r4 /* compute length */ |
163 | add r8,r8,r5 /* ensure we get enough */ | 163 | add r8,r8,r5 /* ensure we get enough */ |
164 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */ | 164 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */ |
165 | srw. r8,r8,r9 /* compute line count */ | 165 | srw. r8,r8,r9 /* compute line count */ |
166 | beqlr /* nothing to do? */ | 166 | beqlr /* nothing to do? */ |
167 | mtctr r8 | 167 | mtctr r8 |
168 | 1: dcbst 0,r6 | 168 | 1: dcbst 0,r6 |
169 | add r6,r6,r7 | 169 | add r6,r6,r7 |
170 | bdnz 1b | 170 | bdnz 1b |
171 | sync | 171 | sync |
172 | 172 | ||
173 | /* Now invalidate the instruction cache */ | 173 | /* Now invalidate the instruction cache */ |
174 | 174 | ||
175 | lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */ | 175 | lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */ |
176 | addi r5,r7,-1 | 176 | addi r5,r7,-1 |
177 | andc r6,r3,r5 /* round low to line bdy */ | 177 | andc r6,r3,r5 /* round low to line bdy */ |
178 | subf r8,r6,r4 /* compute length */ | 178 | subf r8,r6,r4 /* compute length */ |
179 | add r8,r8,r5 | 179 | add r8,r8,r5 |
180 | lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */ | 180 | lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */ |
181 | srw. r8,r8,r9 /* compute line count */ | 181 | srw. r8,r8,r9 /* compute line count */ |
182 | beqlr /* nothing to do? */ | 182 | beqlr /* nothing to do? */ |
183 | mtctr r8 | 183 | mtctr r8 |
184 | 2: icbi 0,r6 | 184 | 2: icbi 0,r6 |
185 | add r6,r6,r7 | 185 | add r6,r6,r7 |
186 | bdnz 2b | 186 | bdnz 2b |
187 | isync | 187 | isync |
188 | blr | 188 | blr |
189 | .previous .text | 189 | .previous .text |
190 | /* | 190 | /* |
191 | * Like above, but only do the D-cache. | 191 | * Like above, but only do the D-cache. |
192 | * | 192 | * |
193 | * flush_dcache_range(unsigned long start, unsigned long stop) | 193 | * flush_dcache_range(unsigned long start, unsigned long stop) |
194 | * | 194 | * |
195 | * flush all bytes from start to stop-1 inclusive | 195 | * flush all bytes from start to stop-1 inclusive |
196 | */ | 196 | */ |
197 | _GLOBAL(flush_dcache_range) | 197 | _GLOBAL(flush_dcache_range) |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * Flush the data cache to memory | 200 | * Flush the data cache to memory |
201 | * | 201 | * |
202 | * Different systems have different cache line sizes | 202 | * Different systems have different cache line sizes |
203 | */ | 203 | */ |
204 | ld r10,PPC64_CACHES@toc(r2) | 204 | ld r10,PPC64_CACHES@toc(r2) |
205 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ | 205 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ |
206 | addi r5,r7,-1 | 206 | addi r5,r7,-1 |
207 | andc r6,r3,r5 /* round low to line bdy */ | 207 | andc r6,r3,r5 /* round low to line bdy */ |
208 | subf r8,r6,r4 /* compute length */ | 208 | subf r8,r6,r4 /* compute length */ |
209 | add r8,r8,r5 /* ensure we get enough */ | 209 | add r8,r8,r5 /* ensure we get enough */ |
210 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ | 210 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ |
211 | srw. r8,r8,r9 /* compute line count */ | 211 | srw. r8,r8,r9 /* compute line count */ |
212 | beqlr /* nothing to do? */ | 212 | beqlr /* nothing to do? */ |
213 | mtctr r8 | 213 | mtctr r8 |
214 | 0: dcbst 0,r6 | 214 | 0: dcbst 0,r6 |
215 | add r6,r6,r7 | 215 | add r6,r6,r7 |
216 | bdnz 0b | 216 | bdnz 0b |
217 | sync | 217 | sync |
218 | blr | 218 | blr |
219 | 219 | ||
220 | /* | 220 | /* |
221 | * Like above, but works on non-mapped physical addresses. | 221 | * Like above, but works on non-mapped physical addresses. |
222 | * Use only for non-LPAR setups ! It also assumes real mode | 222 | * Use only for non-LPAR setups ! It also assumes real mode |
223 | * is cacheable. Used for flushing out the DART before using | 223 | * is cacheable. Used for flushing out the DART before using |
224 | * it as uncacheable memory | 224 | * it as uncacheable memory |
225 | * | 225 | * |
226 | * flush_dcache_phys_range(unsigned long start, unsigned long stop) | 226 | * flush_dcache_phys_range(unsigned long start, unsigned long stop) |
227 | * | 227 | * |
228 | * flush all bytes from start to stop-1 inclusive | 228 | * flush all bytes from start to stop-1 inclusive |
229 | */ | 229 | */ |
230 | _GLOBAL(flush_dcache_phys_range) | 230 | _GLOBAL(flush_dcache_phys_range) |
231 | ld r10,PPC64_CACHES@toc(r2) | 231 | ld r10,PPC64_CACHES@toc(r2) |
232 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ | 232 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ |
233 | addi r5,r7,-1 | 233 | addi r5,r7,-1 |
234 | andc r6,r3,r5 /* round low to line bdy */ | 234 | andc r6,r3,r5 /* round low to line bdy */ |
235 | subf r8,r6,r4 /* compute length */ | 235 | subf r8,r6,r4 /* compute length */ |
236 | add r8,r8,r5 /* ensure we get enough */ | 236 | add r8,r8,r5 /* ensure we get enough */ |
237 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ | 237 | lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */ |
238 | srw. r8,r8,r9 /* compute line count */ | 238 | srw. r8,r8,r9 /* compute line count */ |
239 | beqlr /* nothing to do? */ | 239 | beqlr /* nothing to do? */ |
240 | mfmsr r5 /* Disable MMU Data Relocation */ | 240 | mfmsr r5 /* Disable MMU Data Relocation */ |
241 | ori r0,r5,MSR_DR | 241 | ori r0,r5,MSR_DR |
242 | xori r0,r0,MSR_DR | 242 | xori r0,r0,MSR_DR |
243 | sync | 243 | sync |
244 | mtmsr r0 | 244 | mtmsr r0 |
245 | sync | 245 | sync |
246 | isync | 246 | isync |
247 | mtctr r8 | 247 | mtctr r8 |
248 | 0: dcbst 0,r6 | 248 | 0: dcbst 0,r6 |
249 | add r6,r6,r7 | 249 | add r6,r6,r7 |
250 | bdnz 0b | 250 | bdnz 0b |
251 | sync | 251 | sync |
252 | isync | 252 | isync |
253 | mtmsr r5 /* Re-enable MMU Data Relocation */ | 253 | mtmsr r5 /* Re-enable MMU Data Relocation */ |
254 | sync | 254 | sync |
255 | isync | 255 | isync |
256 | blr | 256 | blr |
257 | 257 | ||
258 | _GLOBAL(flush_inval_dcache_range) | 258 | _GLOBAL(flush_inval_dcache_range) |
259 | ld r10,PPC64_CACHES@toc(r2) | 259 | ld r10,PPC64_CACHES@toc(r2) |
260 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ | 260 | lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */ |
261 | addi r5,r7,-1 | 261 | addi r5,r7,-1 |
262 | andc r6,r3,r5 /* round low to line bdy */ | 262 | andc r6,r3,r5 /* round low to line bdy */ |
263 | subf r8,r6,r4 /* compute length */ | 263 | subf r8,r6,r4 /* compute length */ |
264 | add r8,r8,r5 /* ensure we get enough */ | 264 | add r8,r8,r5 /* ensure we get enough */ |
265 | lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */ | 265 | lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */ |
266 | srw. r8,r8,r9 /* compute line count */ | 266 | srw. r8,r8,r9 /* compute line count */ |
267 | beqlr /* nothing to do? */ | 267 | beqlr /* nothing to do? */ |
268 | sync | 268 | sync |
269 | isync | 269 | isync |
270 | mtctr r8 | 270 | mtctr r8 |
271 | 0: dcbf 0,r6 | 271 | 0: dcbf 0,r6 |
272 | add r6,r6,r7 | 272 | add r6,r6,r7 |
273 | bdnz 0b | 273 | bdnz 0b |
274 | sync | 274 | sync |
275 | isync | 275 | isync |
276 | blr | 276 | blr |
277 | 277 | ||
278 | 278 | ||
279 | /* | 279 | /* |
280 | * Flush a particular page from the data cache to RAM. | 280 | * Flush a particular page from the data cache to RAM. |
281 | * Note: this is necessary because the instruction cache does *not* | 281 | * Note: this is necessary because the instruction cache does *not* |
282 | * snoop from the data cache. | 282 | * snoop from the data cache. |
283 | * | 283 | * |
284 | * void __flush_dcache_icache(void *page) | 284 | * void __flush_dcache_icache(void *page) |
285 | */ | 285 | */ |
286 | _GLOBAL(__flush_dcache_icache) | 286 | _GLOBAL(__flush_dcache_icache) |
287 | /* | 287 | /* |
288 | * Flush the data cache to memory | 288 | * Flush the data cache to memory |
289 | * | 289 | * |
290 | * Different systems have different cache line sizes | 290 | * Different systems have different cache line sizes |
291 | */ | 291 | */ |
292 | 292 | ||
293 | /* Flush the dcache */ | 293 | /* Flush the dcache */ |
294 | ld r7,PPC64_CACHES@toc(r2) | 294 | ld r7,PPC64_CACHES@toc(r2) |
295 | clrrdi r3,r3,PAGE_SHIFT /* Page align */ | 295 | clrrdi r3,r3,PAGE_SHIFT /* Page align */ |
296 | lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ | 296 | lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ |
297 | lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ | 297 | lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ |
298 | mr r6,r3 | 298 | mr r6,r3 |
299 | mtctr r4 | 299 | mtctr r4 |
300 | 0: dcbst 0,r6 | 300 | 0: dcbst 0,r6 |
301 | add r6,r6,r5 | 301 | add r6,r6,r5 |
302 | bdnz 0b | 302 | bdnz 0b |
303 | sync | 303 | sync |
304 | 304 | ||
305 | /* Now invalidate the icache */ | 305 | /* Now invalidate the icache */ |
306 | 306 | ||
307 | lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */ | 307 | lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */ |
308 | lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */ | 308 | lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */ |
309 | mtctr r4 | 309 | mtctr r4 |
310 | 1: icbi 0,r3 | 310 | 1: icbi 0,r3 |
311 | add r3,r3,r5 | 311 | add r3,r3,r5 |
312 | bdnz 1b | 312 | bdnz 1b |
313 | isync | 313 | isync |
314 | blr | 314 | blr |
315 | 315 | ||
316 | /* | 316 | /* |
317 | * I/O string operations | 317 | * I/O string operations |
318 | * | 318 | * |
319 | * insb(port, buf, len) | 319 | * insb(port, buf, len) |
320 | * outsb(port, buf, len) | 320 | * outsb(port, buf, len) |
321 | * insw(port, buf, len) | 321 | * insw(port, buf, len) |
322 | * outsw(port, buf, len) | 322 | * outsw(port, buf, len) |
323 | * insl(port, buf, len) | 323 | * insl(port, buf, len) |
324 | * outsl(port, buf, len) | 324 | * outsl(port, buf, len) |
325 | * insw_ns(port, buf, len) | 325 | * insw_ns(port, buf, len) |
326 | * outsw_ns(port, buf, len) | 326 | * outsw_ns(port, buf, len) |
327 | * insl_ns(port, buf, len) | 327 | * insl_ns(port, buf, len) |
328 | * outsl_ns(port, buf, len) | 328 | * outsl_ns(port, buf, len) |
329 | * | 329 | * |
330 | * The *_ns versions don't do byte-swapping. | 330 | * The *_ns versions don't do byte-swapping. |
331 | */ | 331 | */ |
332 | _GLOBAL(_insb) | 332 | _GLOBAL(_insb) |
333 | cmpwi 0,r5,0 | 333 | cmpwi 0,r5,0 |
334 | mtctr r5 | 334 | mtctr r5 |
335 | subi r4,r4,1 | 335 | subi r4,r4,1 |
336 | blelr- | 336 | blelr- |
337 | 00: lbz r5,0(r3) | 337 | 00: lbz r5,0(r3) |
338 | eieio | 338 | eieio |
339 | stbu r5,1(r4) | 339 | stbu r5,1(r4) |
340 | bdnz 00b | 340 | bdnz 00b |
341 | twi 0,r5,0 | 341 | twi 0,r5,0 |
342 | isync | 342 | isync |
343 | blr | 343 | blr |
344 | 344 | ||
345 | _GLOBAL(_outsb) | 345 | _GLOBAL(_outsb) |
346 | cmpwi 0,r5,0 | 346 | cmpwi 0,r5,0 |
347 | mtctr r5 | 347 | mtctr r5 |
348 | subi r4,r4,1 | 348 | subi r4,r4,1 |
349 | blelr- | 349 | blelr- |
350 | 00: lbzu r5,1(r4) | 350 | 00: lbzu r5,1(r4) |
351 | stb r5,0(r3) | 351 | stb r5,0(r3) |
352 | bdnz 00b | 352 | bdnz 00b |
353 | sync | 353 | sync |
354 | blr | 354 | blr |
355 | 355 | ||
356 | _GLOBAL(_insw) | 356 | _GLOBAL(_insw) |
357 | cmpwi 0,r5,0 | 357 | cmpwi 0,r5,0 |
358 | mtctr r5 | 358 | mtctr r5 |
359 | subi r4,r4,2 | 359 | subi r4,r4,2 |
360 | blelr- | 360 | blelr- |
361 | 00: lhbrx r5,0,r3 | 361 | 00: lhbrx r5,0,r3 |
362 | eieio | 362 | eieio |
363 | sthu r5,2(r4) | 363 | sthu r5,2(r4) |
364 | bdnz 00b | 364 | bdnz 00b |
365 | twi 0,r5,0 | 365 | twi 0,r5,0 |
366 | isync | 366 | isync |
367 | blr | 367 | blr |
368 | 368 | ||
369 | _GLOBAL(_outsw) | 369 | _GLOBAL(_outsw) |
370 | cmpwi 0,r5,0 | 370 | cmpwi 0,r5,0 |
371 | mtctr r5 | 371 | mtctr r5 |
372 | subi r4,r4,2 | 372 | subi r4,r4,2 |
373 | blelr- | 373 | blelr- |
374 | 00: lhzu r5,2(r4) | 374 | 00: lhzu r5,2(r4) |
375 | sthbrx r5,0,r3 | 375 | sthbrx r5,0,r3 |
376 | bdnz 00b | 376 | bdnz 00b |
377 | sync | 377 | sync |
378 | blr | 378 | blr |
379 | 379 | ||
380 | _GLOBAL(_insl) | 380 | _GLOBAL(_insl) |
381 | cmpwi 0,r5,0 | 381 | cmpwi 0,r5,0 |
382 | mtctr r5 | 382 | mtctr r5 |
383 | subi r4,r4,4 | 383 | subi r4,r4,4 |
384 | blelr- | 384 | blelr- |
385 | 00: lwbrx r5,0,r3 | 385 | 00: lwbrx r5,0,r3 |
386 | eieio | 386 | eieio |
387 | stwu r5,4(r4) | 387 | stwu r5,4(r4) |
388 | bdnz 00b | 388 | bdnz 00b |
389 | twi 0,r5,0 | 389 | twi 0,r5,0 |
390 | isync | 390 | isync |
391 | blr | 391 | blr |
392 | 392 | ||
393 | _GLOBAL(_outsl) | 393 | _GLOBAL(_outsl) |
394 | cmpwi 0,r5,0 | 394 | cmpwi 0,r5,0 |
395 | mtctr r5 | 395 | mtctr r5 |
396 | subi r4,r4,4 | 396 | subi r4,r4,4 |
397 | blelr- | 397 | blelr- |
398 | 00: lwzu r5,4(r4) | 398 | 00: lwzu r5,4(r4) |
399 | stwbrx r5,0,r3 | 399 | stwbrx r5,0,r3 |
400 | bdnz 00b | 400 | bdnz 00b |
401 | sync | 401 | sync |
402 | blr | 402 | blr |
403 | 403 | ||
404 | /* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */ | 404 | /* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */ |
405 | _GLOBAL(_insw_ns) | 405 | _GLOBAL(_insw_ns) |
406 | cmpwi 0,r5,0 | 406 | cmpwi 0,r5,0 |
407 | mtctr r5 | 407 | mtctr r5 |
408 | subi r4,r4,2 | 408 | subi r4,r4,2 |
409 | blelr- | 409 | blelr- |
410 | 00: lhz r5,0(r3) | 410 | 00: lhz r5,0(r3) |
411 | eieio | 411 | eieio |
412 | sthu r5,2(r4) | 412 | sthu r5,2(r4) |
413 | bdnz 00b | 413 | bdnz 00b |
414 | twi 0,r5,0 | 414 | twi 0,r5,0 |
415 | isync | 415 | isync |
416 | blr | 416 | blr |
417 | 417 | ||
418 | /* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */ | 418 | /* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */ |
419 | _GLOBAL(_outsw_ns) | 419 | _GLOBAL(_outsw_ns) |
420 | cmpwi 0,r5,0 | 420 | cmpwi 0,r5,0 |
421 | mtctr r5 | 421 | mtctr r5 |
422 | subi r4,r4,2 | 422 | subi r4,r4,2 |
423 | blelr- | 423 | blelr- |
424 | 00: lhzu r5,2(r4) | 424 | 00: lhzu r5,2(r4) |
425 | sth r5,0(r3) | 425 | sth r5,0(r3) |
426 | bdnz 00b | 426 | bdnz 00b |
427 | sync | 427 | sync |
428 | blr | 428 | blr |
429 | 429 | ||
430 | _GLOBAL(_insl_ns) | 430 | _GLOBAL(_insl_ns) |
431 | cmpwi 0,r5,0 | 431 | cmpwi 0,r5,0 |
432 | mtctr r5 | 432 | mtctr r5 |
433 | subi r4,r4,4 | 433 | subi r4,r4,4 |
434 | blelr- | 434 | blelr- |
435 | 00: lwz r5,0(r3) | 435 | 00: lwz r5,0(r3) |
436 | eieio | 436 | eieio |
437 | stwu r5,4(r4) | 437 | stwu r5,4(r4) |
438 | bdnz 00b | 438 | bdnz 00b |
439 | twi 0,r5,0 | 439 | twi 0,r5,0 |
440 | isync | 440 | isync |
441 | blr | 441 | blr |
442 | 442 | ||
443 | _GLOBAL(_outsl_ns) | 443 | _GLOBAL(_outsl_ns) |
444 | cmpwi 0,r5,0 | 444 | cmpwi 0,r5,0 |
445 | mtctr r5 | 445 | mtctr r5 |
446 | subi r4,r4,4 | 446 | subi r4,r4,4 |
447 | blelr- | 447 | blelr- |
448 | 00: lwzu r5,4(r4) | 448 | 00: lwzu r5,4(r4) |
449 | stw r5,0(r3) | 449 | stw r5,0(r3) |
450 | bdnz 00b | 450 | bdnz 00b |
451 | sync | 451 | sync |
452 | blr | 452 | blr |
453 | 453 | ||
454 | /* | 454 | /* |
455 | * identify_cpu and calls setup_cpu | 455 | * identify_cpu and calls setup_cpu |
456 | * In: r3 = base of the cpu_specs array | 456 | * In: r3 = base of the cpu_specs array |
457 | * r4 = address of cur_cpu_spec | 457 | * r4 = address of cur_cpu_spec |
458 | * r5 = relocation offset | 458 | * r5 = relocation offset |
459 | */ | 459 | */ |
460 | _GLOBAL(identify_cpu) | 460 | _GLOBAL(identify_cpu) |
461 | mfpvr r7 | 461 | mfpvr r7 |
462 | 1: | 462 | 1: |
463 | lwz r8,CPU_SPEC_PVR_MASK(r3) | 463 | lwz r8,CPU_SPEC_PVR_MASK(r3) |
464 | and r8,r8,r7 | 464 | and r8,r8,r7 |
465 | lwz r9,CPU_SPEC_PVR_VALUE(r3) | 465 | lwz r9,CPU_SPEC_PVR_VALUE(r3) |
466 | cmplw 0,r9,r8 | 466 | cmplw 0,r9,r8 |
467 | beq 1f | 467 | beq 1f |
468 | addi r3,r3,CPU_SPEC_ENTRY_SIZE | 468 | addi r3,r3,CPU_SPEC_ENTRY_SIZE |
469 | b 1b | 469 | b 1b |
470 | 1: | 470 | 1: |
471 | add r0,r3,r5 | 471 | add r0,r3,r5 |
472 | std r0,0(r4) | 472 | std r0,0(r4) |
473 | ld r4,CPU_SPEC_SETUP(r3) | 473 | ld r4,CPU_SPEC_SETUP(r3) |
474 | sub r4,r4,r5 | 474 | sub r4,r4,r5 |
475 | ld r4,0(r4) | 475 | ld r4,0(r4) |
476 | sub r4,r4,r5 | 476 | sub r4,r4,r5 |
477 | mtctr r4 | 477 | mtctr r4 |
478 | /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */ | 478 | /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */ |
479 | mr r4,r3 | 479 | mr r4,r3 |
480 | mr r3,r5 | 480 | mr r3,r5 |
481 | bctr | 481 | bctr |
482 | 482 | ||
483 | /* | 483 | /* |
484 | * do_cpu_ftr_fixups - goes through the list of CPU feature fixups | 484 | * do_cpu_ftr_fixups - goes through the list of CPU feature fixups |
485 | * and writes nop's over sections of code that don't apply for this cpu. | 485 | * and writes nop's over sections of code that don't apply for this cpu. |
486 | * r3 = data offset (not changed) | 486 | * r3 = data offset (not changed) |
487 | */ | 487 | */ |
488 | _GLOBAL(do_cpu_ftr_fixups) | 488 | _GLOBAL(do_cpu_ftr_fixups) |
489 | /* Get CPU 0 features */ | 489 | /* Get CPU 0 features */ |
490 | LOADADDR(r6,cur_cpu_spec) | 490 | LOADADDR(r6,cur_cpu_spec) |
491 | sub r6,r6,r3 | 491 | sub r6,r6,r3 |
492 | ld r4,0(r6) | 492 | ld r4,0(r6) |
493 | sub r4,r4,r3 | 493 | sub r4,r4,r3 |
494 | ld r4,CPU_SPEC_FEATURES(r4) | 494 | ld r4,CPU_SPEC_FEATURES(r4) |
495 | /* Get the fixup table */ | 495 | /* Get the fixup table */ |
496 | LOADADDR(r6,__start___ftr_fixup) | 496 | LOADADDR(r6,__start___ftr_fixup) |
497 | sub r6,r6,r3 | 497 | sub r6,r6,r3 |
498 | LOADADDR(r7,__stop___ftr_fixup) | 498 | LOADADDR(r7,__stop___ftr_fixup) |
499 | sub r7,r7,r3 | 499 | sub r7,r7,r3 |
500 | /* Do the fixup */ | 500 | /* Do the fixup */ |
501 | 1: cmpld r6,r7 | 501 | 1: cmpld r6,r7 |
502 | bgelr | 502 | bgelr |
503 | addi r6,r6,32 | 503 | addi r6,r6,32 |
504 | ld r8,-32(r6) /* mask */ | 504 | ld r8,-32(r6) /* mask */ |
505 | and r8,r8,r4 | 505 | and r8,r8,r4 |
506 | ld r9,-24(r6) /* value */ | 506 | ld r9,-24(r6) /* value */ |
507 | cmpld r8,r9 | 507 | cmpld r8,r9 |
508 | beq 1b | 508 | beq 1b |
509 | ld r8,-16(r6) /* section begin */ | 509 | ld r8,-16(r6) /* section begin */ |
510 | ld r9,-8(r6) /* section end */ | 510 | ld r9,-8(r6) /* section end */ |
511 | subf. r9,r8,r9 | 511 | subf. r9,r8,r9 |
512 | beq 1b | 512 | beq 1b |
513 | /* write nops over the section of code */ | 513 | /* write nops over the section of code */ |
514 | /* todo: if large section, add a branch at the start of it */ | 514 | /* todo: if large section, add a branch at the start of it */ |
515 | srwi r9,r9,2 | 515 | srwi r9,r9,2 |
516 | mtctr r9 | 516 | mtctr r9 |
517 | sub r8,r8,r3 | 517 | sub r8,r8,r3 |
518 | lis r0,0x60000000@h /* nop */ | 518 | lis r0,0x60000000@h /* nop */ |
519 | 3: stw r0,0(r8) | 519 | 3: stw r0,0(r8) |
520 | andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l | 520 | andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l |
521 | beq 2f | 521 | beq 2f |
522 | dcbst 0,r8 /* suboptimal, but simpler */ | 522 | dcbst 0,r8 /* suboptimal, but simpler */ |
523 | sync | 523 | sync |
524 | icbi 0,r8 | 524 | icbi 0,r8 |
525 | 2: addi r8,r8,4 | 525 | 2: addi r8,r8,4 |
526 | bdnz 3b | 526 | bdnz 3b |
527 | sync /* additional sync needed on g4 */ | 527 | sync /* additional sync needed on g4 */ |
528 | isync | 528 | isync |
529 | b 1b | 529 | b 1b |
530 | 530 | ||
531 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) | 531 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) |
532 | /* | 532 | /* |
533 | * Do an IO access in real mode | 533 | * Do an IO access in real mode |
534 | */ | 534 | */ |
535 | _GLOBAL(real_readb) | 535 | _GLOBAL(real_readb) |
536 | mfmsr r7 | 536 | mfmsr r7 |
537 | ori r0,r7,MSR_DR | 537 | ori r0,r7,MSR_DR |
538 | xori r0,r0,MSR_DR | 538 | xori r0,r0,MSR_DR |
539 | sync | 539 | sync |
540 | mtmsrd r0 | 540 | mtmsrd r0 |
541 | sync | 541 | sync |
542 | isync | 542 | isync |
543 | mfspr r6,SPRN_HID4 | 543 | mfspr r6,SPRN_HID4 |
544 | rldicl r5,r6,32,0 | 544 | rldicl r5,r6,32,0 |
545 | ori r5,r5,0x100 | 545 | ori r5,r5,0x100 |
546 | rldicl r5,r5,32,0 | 546 | rldicl r5,r5,32,0 |
547 | sync | 547 | sync |
548 | mtspr SPRN_HID4,r5 | 548 | mtspr SPRN_HID4,r5 |
549 | isync | 549 | isync |
550 | slbia | 550 | slbia |
551 | isync | 551 | isync |
552 | lbz r3,0(r3) | 552 | lbz r3,0(r3) |
553 | sync | 553 | sync |
554 | mtspr SPRN_HID4,r6 | 554 | mtspr SPRN_HID4,r6 |
555 | isync | 555 | isync |
556 | slbia | 556 | slbia |
557 | isync | 557 | isync |
558 | mtmsrd r7 | 558 | mtmsrd r7 |
559 | sync | 559 | sync |
560 | isync | 560 | isync |
561 | blr | 561 | blr |
562 | 562 | ||
563 | /* | 563 | /* |
564 | * Do an IO access in real mode | 564 | * Do an IO access in real mode |
565 | */ | 565 | */ |
566 | _GLOBAL(real_writeb) | 566 | _GLOBAL(real_writeb) |
567 | mfmsr r7 | 567 | mfmsr r7 |
568 | ori r0,r7,MSR_DR | 568 | ori r0,r7,MSR_DR |
569 | xori r0,r0,MSR_DR | 569 | xori r0,r0,MSR_DR |
570 | sync | 570 | sync |
571 | mtmsrd r0 | 571 | mtmsrd r0 |
572 | sync | 572 | sync |
573 | isync | 573 | isync |
574 | mfspr r6,SPRN_HID4 | 574 | mfspr r6,SPRN_HID4 |
575 | rldicl r5,r6,32,0 | 575 | rldicl r5,r6,32,0 |
576 | ori r5,r5,0x100 | 576 | ori r5,r5,0x100 |
577 | rldicl r5,r5,32,0 | 577 | rldicl r5,r5,32,0 |
578 | sync | 578 | sync |
579 | mtspr SPRN_HID4,r5 | 579 | mtspr SPRN_HID4,r5 |
580 | isync | 580 | isync |
581 | slbia | 581 | slbia |
582 | isync | 582 | isync |
583 | stb r3,0(r4) | 583 | stb r3,0(r4) |
584 | sync | 584 | sync |
585 | mtspr SPRN_HID4,r6 | 585 | mtspr SPRN_HID4,r6 |
586 | isync | 586 | isync |
587 | slbia | 587 | slbia |
588 | isync | 588 | isync |
589 | mtmsrd r7 | 589 | mtmsrd r7 |
590 | sync | 590 | sync |
591 | isync | 591 | isync |
592 | blr | 592 | blr |
593 | #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ | 593 | #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ |
594 | 594 | ||
595 | /* | 595 | /* |
596 | * SCOM access functions for 970 (FX only for now) | 596 | * SCOM access functions for 970 (FX only for now) |
597 | * | 597 | * |
598 | * unsigned long scom970_read(unsigned int address); | 598 | * unsigned long scom970_read(unsigned int address); |
599 | * void scom970_write(unsigned int address, unsigned long value); | 599 | * void scom970_write(unsigned int address, unsigned long value); |
600 | * | 600 | * |
601 | * The address passed in is the 24 bits register address. This code | 601 | * The address passed in is the 24 bits register address. This code |
602 | * is 970 specific and will not check the status bits, so you should | 602 | * is 970 specific and will not check the status bits, so you should |
603 | * know what you are doing. | 603 | * know what you are doing. |
604 | */ | 604 | */ |
605 | _GLOBAL(scom970_read) | 605 | _GLOBAL(scom970_read) |
606 | /* interrupts off */ | 606 | /* interrupts off */ |
607 | mfmsr r4 | 607 | mfmsr r4 |
608 | ori r0,r4,MSR_EE | 608 | ori r0,r4,MSR_EE |
609 | xori r0,r0,MSR_EE | 609 | xori r0,r0,MSR_EE |
610 | mtmsrd r0,1 | 610 | mtmsrd r0,1 |
611 | 611 | ||
612 | /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits | 612 | /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits |
613 | * (including parity). On current CPUs they must be 0'd, | 613 | * (including parity). On current CPUs they must be 0'd, |
614 | * and finally or in RW bit | 614 | * and finally or in RW bit |
615 | */ | 615 | */ |
616 | rlwinm r3,r3,8,0,15 | 616 | rlwinm r3,r3,8,0,15 |
617 | ori r3,r3,0x8000 | 617 | ori r3,r3,0x8000 |
618 | 618 | ||
619 | /* do the actual scom read */ | 619 | /* do the actual scom read */ |
620 | sync | 620 | sync |
621 | mtspr SPRN_SCOMC,r3 | 621 | mtspr SPRN_SCOMC,r3 |
622 | isync | 622 | isync |
623 | mfspr r3,SPRN_SCOMD | 623 | mfspr r3,SPRN_SCOMD |
624 | isync | 624 | isync |
625 | mfspr r0,SPRN_SCOMC | 625 | mfspr r0,SPRN_SCOMC |
626 | isync | 626 | isync |
627 | 627 | ||
628 | /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah | 628 | /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah |
629 | * that's the best we can do). Not implemented yet as we don't use | 629 | * that's the best we can do). Not implemented yet as we don't use |
630 | * the scom on any of the bogus CPUs yet, but may have to be done | 630 | * the scom on any of the bogus CPUs yet, but may have to be done |
631 | * ultimately | 631 | * ultimately |
632 | */ | 632 | */ |
633 | 633 | ||
634 | /* restore interrupts */ | 634 | /* restore interrupts */ |
635 | mtmsrd r4,1 | 635 | mtmsrd r4,1 |
636 | blr | 636 | blr |
637 | 637 | ||
638 | 638 | ||
639 | _GLOBAL(scom970_write) | 639 | _GLOBAL(scom970_write) |
640 | /* interrupts off */ | 640 | /* interrupts off */ |
641 | mfmsr r5 | 641 | mfmsr r5 |
642 | ori r0,r5,MSR_EE | 642 | ori r0,r5,MSR_EE |
643 | xori r0,r0,MSR_EE | 643 | xori r0,r0,MSR_EE |
644 | mtmsrd r0,1 | 644 | mtmsrd r0,1 |
645 | 645 | ||
646 | /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits | 646 | /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits |
647 | * (including parity). On current CPUs they must be 0'd. | 647 | * (including parity). On current CPUs they must be 0'd. |
648 | */ | 648 | */ |
649 | 649 | ||
650 | rlwinm r3,r3,8,0,15 | 650 | rlwinm r3,r3,8,0,15 |
651 | 651 | ||
652 | sync | 652 | sync |
653 | mtspr SPRN_SCOMD,r4 /* write data */ | 653 | mtspr SPRN_SCOMD,r4 /* write data */ |
654 | isync | 654 | isync |
655 | mtspr SPRN_SCOMC,r3 /* write command */ | 655 | mtspr SPRN_SCOMC,r3 /* write command */ |
656 | isync | 656 | isync |
657 | mfspr 3,SPRN_SCOMC | 657 | mfspr 3,SPRN_SCOMC |
658 | isync | 658 | isync |
659 | 659 | ||
660 | /* restore interrupts */ | 660 | /* restore interrupts */ |
661 | mtmsrd r5,1 | 661 | mtmsrd r5,1 |
662 | blr | 662 | blr |
663 | 663 | ||
664 | 664 | ||
665 | /* | 665 | /* |
666 | * Create a kernel thread | 666 | * Create a kernel thread |
667 | * kernel_thread(fn, arg, flags) | 667 | * kernel_thread(fn, arg, flags) |
668 | */ | 668 | */ |
669 | _GLOBAL(kernel_thread) | 669 | _GLOBAL(kernel_thread) |
670 | std r29,-24(r1) | 670 | std r29,-24(r1) |
671 | std r30,-16(r1) | 671 | std r30,-16(r1) |
672 | stdu r1,-STACK_FRAME_OVERHEAD(r1) | 672 | stdu r1,-STACK_FRAME_OVERHEAD(r1) |
673 | mr r29,r3 | 673 | mr r29,r3 |
674 | mr r30,r4 | 674 | mr r30,r4 |
675 | ori r3,r5,CLONE_VM /* flags */ | 675 | ori r3,r5,CLONE_VM /* flags */ |
676 | oris r3,r3,(CLONE_UNTRACED>>16) | 676 | oris r3,r3,(CLONE_UNTRACED>>16) |
677 | li r4,0 /* new sp (unused) */ | 677 | li r4,0 /* new sp (unused) */ |
678 | li r0,__NR_clone | 678 | li r0,__NR_clone |
679 | sc | 679 | sc |
680 | cmpdi 0,r3,0 /* parent or child? */ | 680 | cmpdi 0,r3,0 /* parent or child? */ |
681 | bne 1f /* return if parent */ | 681 | bne 1f /* return if parent */ |
682 | li r0,0 | 682 | li r0,0 |
683 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | 683 | stdu r0,-STACK_FRAME_OVERHEAD(r1) |
684 | ld r2,8(r29) | 684 | ld r2,8(r29) |
685 | ld r29,0(r29) | 685 | ld r29,0(r29) |
686 | mtlr r29 /* fn addr in lr */ | 686 | mtlr r29 /* fn addr in lr */ |
687 | mr r3,r30 /* load arg and call fn */ | 687 | mr r3,r30 /* load arg and call fn */ |
688 | blrl | 688 | blrl |
689 | li r0,__NR_exit /* exit after child exits */ | 689 | li r0,__NR_exit /* exit after child exits */ |
690 | li r3,0 | 690 | li r3,0 |
691 | sc | 691 | sc |
692 | 1: addi r1,r1,STACK_FRAME_OVERHEAD | 692 | 1: addi r1,r1,STACK_FRAME_OVERHEAD |
693 | ld r29,-24(r1) | 693 | ld r29,-24(r1) |
694 | ld r30,-16(r1) | 694 | ld r30,-16(r1) |
695 | blr | 695 | blr |
696 | 696 | ||
697 | /* | 697 | /* |
698 | * disable_kernel_fp() | 698 | * disable_kernel_fp() |
699 | * Disable the FPU. | 699 | * Disable the FPU. |
700 | */ | 700 | */ |
701 | _GLOBAL(disable_kernel_fp) | 701 | _GLOBAL(disable_kernel_fp) |
702 | mfmsr r3 | 702 | mfmsr r3 |
703 | rldicl r0,r3,(63-MSR_FP_LG),1 | 703 | rldicl r0,r3,(63-MSR_FP_LG),1 |
704 | rldicl r3,r0,(MSR_FP_LG+1),0 | 704 | rldicl r3,r0,(MSR_FP_LG+1),0 |
705 | mtmsrd r3 /* disable use of fpu now */ | 705 | mtmsrd r3 /* disable use of fpu now */ |
706 | isync | 706 | isync |
707 | blr | 707 | blr |
708 | 708 | ||
709 | #ifdef CONFIG_ALTIVEC | 709 | #ifdef CONFIG_ALTIVEC |
710 | 710 | ||
711 | #if 0 /* this has no callers for now */ | 711 | #if 0 /* this has no callers for now */ |
712 | /* | 712 | /* |
713 | * disable_kernel_altivec() | 713 | * disable_kernel_altivec() |
714 | * Disable the VMX. | 714 | * Disable the VMX. |
715 | */ | 715 | */ |
716 | _GLOBAL(disable_kernel_altivec) | 716 | _GLOBAL(disable_kernel_altivec) |
717 | mfmsr r3 | 717 | mfmsr r3 |
718 | rldicl r0,r3,(63-MSR_VEC_LG),1 | 718 | rldicl r0,r3,(63-MSR_VEC_LG),1 |
719 | rldicl r3,r0,(MSR_VEC_LG+1),0 | 719 | rldicl r3,r0,(MSR_VEC_LG+1),0 |
720 | mtmsrd r3 /* disable use of VMX now */ | 720 | mtmsrd r3 /* disable use of VMX now */ |
721 | isync | 721 | isync |
722 | blr | 722 | blr |
723 | #endif /* 0 */ | 723 | #endif /* 0 */ |
724 | 724 | ||
725 | /* | 725 | /* |
726 | * giveup_altivec(tsk) | 726 | * giveup_altivec(tsk) |
727 | * Disable VMX for the task given as the argument, | 727 | * Disable VMX for the task given as the argument, |
728 | * and save the vector registers in its thread_struct. | 728 | * and save the vector registers in its thread_struct. |
729 | * Enables the VMX for use in the kernel on return. | 729 | * Enables the VMX for use in the kernel on return. |
730 | */ | 730 | */ |
731 | _GLOBAL(giveup_altivec) | 731 | _GLOBAL(giveup_altivec) |
732 | mfmsr r5 | 732 | mfmsr r5 |
733 | oris r5,r5,MSR_VEC@h | 733 | oris r5,r5,MSR_VEC@h |
734 | mtmsrd r5 /* enable use of VMX now */ | 734 | mtmsrd r5 /* enable use of VMX now */ |
735 | isync | 735 | isync |
736 | cmpdi 0,r3,0 | 736 | cmpdi 0,r3,0 |
737 | beqlr- /* if no previous owner, done */ | 737 | beqlr- /* if no previous owner, done */ |
738 | addi r3,r3,THREAD /* want THREAD of task */ | 738 | addi r3,r3,THREAD /* want THREAD of task */ |
739 | ld r5,PT_REGS(r3) | 739 | ld r5,PT_REGS(r3) |
740 | cmpdi 0,r5,0 | 740 | cmpdi 0,r5,0 |
741 | SAVE_32VRS(0,r4,r3) | 741 | SAVE_32VRS(0,r4,r3) |
742 | mfvscr vr0 | 742 | mfvscr vr0 |
743 | li r4,THREAD_VSCR | 743 | li r4,THREAD_VSCR |
744 | stvx vr0,r4,r3 | 744 | stvx vr0,r4,r3 |
745 | beq 1f | 745 | beq 1f |
746 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | 746 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
747 | lis r3,MSR_VEC@h | 747 | lis r3,MSR_VEC@h |
748 | andc r4,r4,r3 /* disable FP for previous task */ | 748 | andc r4,r4,r3 /* disable FP for previous task */ |
749 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | 749 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
750 | 1: | 750 | 1: |
751 | #ifndef CONFIG_SMP | 751 | #ifndef CONFIG_SMP |
752 | li r5,0 | 752 | li r5,0 |
753 | ld r4,last_task_used_altivec@got(r2) | 753 | ld r4,last_task_used_altivec@got(r2) |
754 | std r5,0(r4) | 754 | std r5,0(r4) |
755 | #endif /* CONFIG_SMP */ | 755 | #endif /* CONFIG_SMP */ |
756 | blr | 756 | blr |
757 | 757 | ||
758 | #endif /* CONFIG_ALTIVEC */ | 758 | #endif /* CONFIG_ALTIVEC */ |
759 | 759 | ||
760 | _GLOBAL(__setup_cpu_power3) | 760 | _GLOBAL(__setup_cpu_power3) |
761 | blr | 761 | blr |
762 | 762 | ||
763 | _GLOBAL(execve) | 763 | _GLOBAL(execve) |
764 | li r0,__NR_execve | 764 | li r0,__NR_execve |
765 | sc | 765 | sc |
766 | bnslr | 766 | bnslr |
767 | neg r3,r3 | 767 | neg r3,r3 |
768 | blr | 768 | blr |
769 | 769 | ||
770 | /* kexec_wait(phys_cpu) | 770 | /* kexec_wait(phys_cpu) |
771 | * | 771 | * |
772 | * wait for the flag to change, indicating this kernel is going away but | 772 | * wait for the flag to change, indicating this kernel is going away but |
773 | * the slave code for the next one is at addresses 0 to 100. | 773 | * the slave code for the next one is at addresses 0 to 100. |
774 | * | 774 | * |
775 | * This is used by all slaves. | 775 | * This is used by all slaves. |
776 | * | 776 | * |
777 | * Physical (hardware) cpu id should be in r3. | 777 | * Physical (hardware) cpu id should be in r3. |
778 | */ | 778 | */ |
779 | _GLOBAL(kexec_wait) | 779 | _GLOBAL(kexec_wait) |
780 | bl 1f | 780 | bl 1f |
781 | 1: mflr r5 | 781 | 1: mflr r5 |
782 | addi r5,r5,kexec_flag-1b | 782 | addi r5,r5,kexec_flag-1b |
783 | 783 | ||
784 | 99: HMT_LOW | 784 | 99: HMT_LOW |
785 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ | 785 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ |
786 | lwz r4,0(r5) | 786 | lwz r4,0(r5) |
787 | cmpwi 0,r4,0 | 787 | cmpwi 0,r4,0 |
788 | bnea 0x60 | 788 | bnea 0x60 |
789 | #endif | 789 | #endif |
790 | b 99b | 790 | b 99b |
791 | 791 | ||
792 | /* this can be in text because we won't change it until we are | 792 | /* this can be in text because we won't change it until we are |
793 | * running in real anyways | 793 | * running in real anyways |
794 | */ | 794 | */ |
795 | kexec_flag: | 795 | kexec_flag: |
796 | .long 0 | 796 | .long 0 |
797 | 797 | ||
798 | 798 | ||
799 | #ifdef CONFIG_KEXEC | 799 | #ifdef CONFIG_KEXEC |
800 | 800 | ||
801 | /* kexec_smp_wait(void) | 801 | /* kexec_smp_wait(void) |
802 | * | 802 | * |
803 | * call with interrupts off | 803 | * call with interrupts off |
804 | * note: this is a terminal routine, it does not save lr | 804 | * note: this is a terminal routine, it does not save lr |
805 | * | 805 | * |
806 | * get phys id from paca | 806 | * get phys id from paca |
807 | * set paca id to -1 to say we got here | 807 | * set paca id to -1 to say we got here |
808 | * switch to real mode | 808 | * switch to real mode |
809 | * join other cpus in kexec_wait(phys_id) | 809 | * join other cpus in kexec_wait(phys_id) |
810 | */ | 810 | */ |
811 | _GLOBAL(kexec_smp_wait) | 811 | _GLOBAL(kexec_smp_wait) |
812 | lhz r3,PACAHWCPUID(r13) | 812 | lhz r3,PACAHWCPUID(r13) |
813 | li r4,-1 | 813 | li r4,-1 |
814 | sth r4,PACAHWCPUID(r13) /* let others know we left */ | 814 | sth r4,PACAHWCPUID(r13) /* let others know we left */ |
815 | bl real_mode | 815 | bl real_mode |
816 | b .kexec_wait | 816 | b .kexec_wait |
817 | 817 | ||
818 | /* | 818 | /* |
819 | * switch to real mode (turn mmu off) | 819 | * switch to real mode (turn mmu off) |
820 | * we use the early kernel trick that the hardware ignores bits | 820 | * we use the early kernel trick that the hardware ignores bits |
821 | * 0 and 1 (big endian) of the effective address in real mode | 821 | * 0 and 1 (big endian) of the effective address in real mode |
822 | * | 822 | * |
823 | * don't overwrite r3 here, it is live for kexec_wait above. | 823 | * don't overwrite r3 here, it is live for kexec_wait above. |
824 | */ | 824 | */ |
825 | real_mode: /* assume normal blr return */ | 825 | real_mode: /* assume normal blr return */ |
826 | 1: li r9,MSR_RI | 826 | 1: li r9,MSR_RI |
827 | li r10,MSR_DR|MSR_IR | 827 | li r10,MSR_DR|MSR_IR |
828 | mflr r11 /* return address to SRR0 */ | 828 | mflr r11 /* return address to SRR0 */ |
829 | mfmsr r12 | 829 | mfmsr r12 |
830 | andc r9,r12,r9 | 830 | andc r9,r12,r9 |
831 | andc r10,r12,r10 | 831 | andc r10,r12,r10 |
832 | 832 | ||
833 | mtmsrd r9,1 | 833 | mtmsrd r9,1 |
834 | mtspr SPRN_SRR1,r10 | 834 | mtspr SPRN_SRR1,r10 |
835 | mtspr SPRN_SRR0,r11 | 835 | mtspr SPRN_SRR0,r11 |
836 | rfid | 836 | rfid |
837 | 837 | ||
838 | 838 | ||
839 | /* | 839 | /* |
840 | * kexec_sequence(newstack, start, image, control, clear_all()) | 840 | * kexec_sequence(newstack, start, image, control, clear_all()) |
841 | * | 841 | * |
842 | * does the grungy work with stack switching and real mode switches | 842 | * does the grungy work with stack switching and real mode switches |
843 | * also does simple calls to other code | 843 | * also does simple calls to other code |
844 | */ | 844 | */ |
845 | 845 | ||
846 | _GLOBAL(kexec_sequence) | 846 | _GLOBAL(kexec_sequence) |
847 | mflr r0 | 847 | mflr r0 |
848 | std r0,16(r1) | 848 | std r0,16(r1) |
849 | 849 | ||
850 | /* switch stacks to newstack -- &kexec_stack.stack */ | 850 | /* switch stacks to newstack -- &kexec_stack.stack */ |
851 | stdu r1,THREAD_SIZE-112(r3) | 851 | stdu r1,THREAD_SIZE-112(r3) |
852 | mr r1,r3 | 852 | mr r1,r3 |
853 | 853 | ||
854 | li r0,0 | 854 | li r0,0 |
855 | std r0,16(r1) | 855 | std r0,16(r1) |
856 | 856 | ||
857 | /* save regs for local vars on new stack. | 857 | /* save regs for local vars on new stack. |
858 | * yes, we won't go back, but ... | 858 | * yes, we won't go back, but ... |
859 | */ | 859 | */ |
860 | std r31,-8(r1) | 860 | std r31,-8(r1) |
861 | std r30,-16(r1) | 861 | std r30,-16(r1) |
862 | std r29,-24(r1) | 862 | std r29,-24(r1) |
863 | std r28,-32(r1) | 863 | std r28,-32(r1) |
864 | std r27,-40(r1) | 864 | std r27,-40(r1) |
865 | std r26,-48(r1) | 865 | std r26,-48(r1) |
866 | std r25,-56(r1) | 866 | std r25,-56(r1) |
867 | 867 | ||
868 | stdu r1,-112-64(r1) | 868 | stdu r1,-112-64(r1) |
869 | 869 | ||
870 | /* save args into preserved regs */ | 870 | /* save args into preserved regs */ |
871 | mr r31,r3 /* newstack (both) */ | 871 | mr r31,r3 /* newstack (both) */ |
872 | mr r30,r4 /* start (real) */ | 872 | mr r30,r4 /* start (real) */ |
873 | mr r29,r5 /* image (virt) */ | 873 | mr r29,r5 /* image (virt) */ |
874 | mr r28,r6 /* control, unused */ | 874 | mr r28,r6 /* control, unused */ |
875 | mr r27,r7 /* clear_all() fn desc */ | 875 | mr r27,r7 /* clear_all() fn desc */ |
876 | mr r26,r8 /* spare */ | 876 | mr r26,r8 /* spare */ |
877 | lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ | 877 | lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ |
878 | 878 | ||
879 | /* disable interrupts, we are overwriting kernel data next */ | 879 | /* disable interrupts, we are overwriting kernel data next */ |
880 | mfmsr r3 | 880 | mfmsr r3 |
881 | rlwinm r3,r3,0,17,15 | 881 | rlwinm r3,r3,0,17,15 |
882 | mtmsrd r3,1 | 882 | mtmsrd r3,1 |
883 | 883 | ||
884 | /* copy dest pages, flush whole dest image */ | 884 | /* copy dest pages, flush whole dest image */ |
885 | mr r3,r29 | 885 | mr r3,r29 |
886 | bl .kexec_copy_flush /* (image) */ | 886 | bl .kexec_copy_flush /* (image) */ |
887 | 887 | ||
888 | /* turn off mmu */ | 888 | /* turn off mmu */ |
889 | bl real_mode | 889 | bl real_mode |
890 | 890 | ||
891 | /* clear out hardware hash page table and tlb */ | 891 | /* clear out hardware hash page table and tlb */ |
892 | ld r5,0(r27) /* deref function descriptor */ | 892 | ld r5,0(r27) /* deref function descriptor */ |
893 | mtctr r5 | 893 | mtctr r5 |
894 | bctrl /* ppc_md.hash_clear_all(void); */ | 894 | bctrl /* ppc_md.hash_clear_all(void); */ |
895 | 895 | ||
896 | /* | 896 | /* |
897 | * kexec image calling is: | 897 | * kexec image calling is: |
898 | * the first 0x100 bytes of the entry point are copied to 0 | 898 | * the first 0x100 bytes of the entry point are copied to 0 |
899 | * | 899 | * |
900 | * all slaves branch to slave = 0x60 (absolute) | 900 | * all slaves branch to slave = 0x60 (absolute) |
901 | * slave(phys_cpu_id); | 901 | * slave(phys_cpu_id); |
902 | * | 902 | * |
903 | * master goes to start = entry point | 903 | * master goes to start = entry point |
904 | * start(phys_cpu_id, start, 0); | 904 | * start(phys_cpu_id, start, 0); |
905 | * | 905 | * |
906 | * | 906 | * |
907 | * a wrapper is needed to call existing kernels, here is an approximate | 907 | * a wrapper is needed to call existing kernels, here is an approximate |
908 | * description of one method: | 908 | * description of one method: |
909 | * | 909 | * |
910 | * v2: (2.6.10) | 910 | * v2: (2.6.10) |
911 | * start will be near the boot_block (maybe 0x100 bytes before it?) | 911 | * start will be near the boot_block (maybe 0x100 bytes before it?) |
912 | * it will have a 0x60, which will b to boot_block, where it will wait | 912 | * it will have a 0x60, which will b to boot_block, where it will wait |
913 | * and 0 will store phys into struct boot-block and load r3 from there, | 913 | * and 0 will store phys into struct boot-block and load r3 from there, |
914 | * copy kernel 0-0x100 and tell slaves to back down to 0x60 again | 914 | * copy kernel 0-0x100 and tell slaves to back down to 0x60 again |
915 | * | 915 | * |
916 | * v1: (2.6.9) | 916 | * v1: (2.6.9) |
917 | * boot block will have all cpus scanning device tree to see if they | 917 | * boot block will have all cpus scanning device tree to see if they |
918 | * are the boot cpu ????? | 918 | * are the boot cpu ????? |
919 | * other device tree differences (prop sizes, va vs pa, etc)... | 919 | * other device tree differences (prop sizes, va vs pa, etc)... |
920 | */ | 920 | */ |
921 | 921 | ||
922 | /* copy 0x100 bytes starting at start to 0 */ | 922 | /* copy 0x100 bytes starting at start to 0 */ |
923 | li r3,0 | 923 | li r3,0 |
924 | mr r4,r30 | 924 | mr r4,r30 |
925 | li r5,0x100 | 925 | li r5,0x100 |
926 | li r6,0 | 926 | li r6,0 |
927 | bl .copy_and_flush /* (dest, src, copy limit, start offset) */ | 927 | bl .copy_and_flush /* (dest, src, copy limit, start offset) */ |
928 | 1: /* assume normal blr return */ | 928 | 1: /* assume normal blr return */ |
929 | 929 | ||
930 | /* release other cpus to the new kernel secondary start at 0x60 */ | 930 | /* release other cpus to the new kernel secondary start at 0x60 */ |
931 | mflr r5 | 931 | mflr r5 |
932 | li r6,1 | 932 | li r6,1 |
933 | stw r6,kexec_flag-1b(5) | 933 | stw r6,kexec_flag-1b(5) |
934 | mr r3,r25 # my phys cpu | 934 | mr r3,r25 # my phys cpu |
935 | mr r4,r30 # start, aka phys mem offset | 935 | mr r4,r30 # start, aka phys mem offset |
936 | mtlr 4 | 936 | mtlr 4 |
937 | li r5,0 | 937 | li r5,0 |
938 | blr /* image->start(physid, image->start, 0); */ | 938 | blr /* image->start(physid, image->start, 0); */ |
939 | #endif /* CONFIG_KEXEC */ | 939 | #endif /* CONFIG_KEXEC */ |
940 | 940 |
include/asm-powerpc/hw_irq.h
1 | /* | 1 | /* |
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | 2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> |
3 | */ | 3 | */ |
4 | #ifndef _ASM_POWERPC_HW_IRQ_H | 4 | #ifndef _ASM_POWERPC_HW_IRQ_H |
5 | #define _ASM_POWERPC_HW_IRQ_H | 5 | #define _ASM_POWERPC_HW_IRQ_H |
6 | 6 | ||
7 | #ifdef __KERNEL__ | 7 | #ifdef __KERNEL__ |
8 | 8 | ||
9 | #include <linux/config.h> | 9 | #include <linux/config.h> |
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <asm/ptrace.h> | 11 | #include <asm/ptrace.h> |
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | 13 | ||
14 | extern void timer_interrupt(struct pt_regs *); | 14 | extern void timer_interrupt(struct pt_regs *); |
15 | extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq); | ||
16 | 15 | ||
17 | #ifdef CONFIG_PPC_ISERIES | 16 | #ifdef CONFIG_PPC_ISERIES |
18 | 17 | ||
19 | extern unsigned long local_get_flags(void); | 18 | extern unsigned long local_get_flags(void); |
20 | extern unsigned long local_irq_disable(void); | 19 | extern unsigned long local_irq_disable(void); |
21 | extern void local_irq_restore(unsigned long); | 20 | extern void local_irq_restore(unsigned long); |
22 | 21 | ||
23 | #define local_irq_enable() local_irq_restore(1) | 22 | #define local_irq_enable() local_irq_restore(1) |
24 | #define local_save_flags(flags) ((flags) = local_get_flags()) | 23 | #define local_save_flags(flags) ((flags) = local_get_flags()) |
25 | #define local_irq_save(flags) ((flags) = local_irq_disable()) | 24 | #define local_irq_save(flags) ((flags) = local_irq_disable()) |
26 | 25 | ||
27 | #define irqs_disabled() (local_get_flags() == 0) | 26 | #define irqs_disabled() (local_get_flags() == 0) |
28 | 27 | ||
29 | #else | 28 | #else |
30 | 29 | ||
31 | #if defined(CONFIG_BOOKE) | 30 | #if defined(CONFIG_BOOKE) |
32 | #define SET_MSR_EE(x) mtmsr(x) | 31 | #define SET_MSR_EE(x) mtmsr(x) |
33 | #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") | 32 | #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") |
34 | #elif defined(__powerpc64__) | 33 | #elif defined(__powerpc64__) |
35 | #define SET_MSR_EE(x) __mtmsrd(x, 1) | 34 | #define SET_MSR_EE(x) __mtmsrd(x, 1) |
36 | #define local_irq_restore(flags) do { \ | 35 | #define local_irq_restore(flags) do { \ |
37 | __asm__ __volatile__("": : :"memory"); \ | 36 | __asm__ __volatile__("": : :"memory"); \ |
38 | __mtmsrd((flags), 1); \ | 37 | __mtmsrd((flags), 1); \ |
39 | } while(0) | 38 | } while(0) |
40 | #else | 39 | #else |
41 | #define SET_MSR_EE(x) mtmsr(x) | 40 | #define SET_MSR_EE(x) mtmsr(x) |
42 | #define local_irq_restore(flags) mtmsr(flags) | 41 | #define local_irq_restore(flags) mtmsr(flags) |
43 | #endif | 42 | #endif |
44 | 43 | ||
45 | static inline void local_irq_disable(void) | 44 | static inline void local_irq_disable(void) |
46 | { | 45 | { |
47 | #ifdef CONFIG_BOOKE | 46 | #ifdef CONFIG_BOOKE |
48 | __asm__ __volatile__("wrteei 0": : :"memory"); | 47 | __asm__ __volatile__("wrteei 0": : :"memory"); |
49 | #else | 48 | #else |
50 | unsigned long msr; | 49 | unsigned long msr; |
51 | __asm__ __volatile__("": : :"memory"); | 50 | __asm__ __volatile__("": : :"memory"); |
52 | msr = mfmsr(); | 51 | msr = mfmsr(); |
53 | SET_MSR_EE(msr & ~MSR_EE); | 52 | SET_MSR_EE(msr & ~MSR_EE); |
54 | #endif | 53 | #endif |
55 | } | 54 | } |
56 | 55 | ||
57 | static inline void local_irq_enable(void) | 56 | static inline void local_irq_enable(void) |
58 | { | 57 | { |
59 | #ifdef CONFIG_BOOKE | 58 | #ifdef CONFIG_BOOKE |
60 | __asm__ __volatile__("wrteei 1": : :"memory"); | 59 | __asm__ __volatile__("wrteei 1": : :"memory"); |
61 | #else | 60 | #else |
62 | unsigned long msr; | 61 | unsigned long msr; |
63 | __asm__ __volatile__("": : :"memory"); | 62 | __asm__ __volatile__("": : :"memory"); |
64 | msr = mfmsr(); | 63 | msr = mfmsr(); |
65 | SET_MSR_EE(msr | MSR_EE); | 64 | SET_MSR_EE(msr | MSR_EE); |
66 | #endif | 65 | #endif |
67 | } | 66 | } |
68 | 67 | ||
69 | static inline void local_irq_save_ptr(unsigned long *flags) | 68 | static inline void local_irq_save_ptr(unsigned long *flags) |
70 | { | 69 | { |
71 | unsigned long msr; | 70 | unsigned long msr; |
72 | msr = mfmsr(); | 71 | msr = mfmsr(); |
73 | *flags = msr; | 72 | *flags = msr; |
74 | #ifdef CONFIG_BOOKE | 73 | #ifdef CONFIG_BOOKE |
75 | __asm__ __volatile__("wrteei 0": : :"memory"); | 74 | __asm__ __volatile__("wrteei 0": : :"memory"); |
76 | #else | 75 | #else |
77 | SET_MSR_EE(msr & ~MSR_EE); | 76 | SET_MSR_EE(msr & ~MSR_EE); |
78 | #endif | 77 | #endif |
79 | __asm__ __volatile__("": : :"memory"); | 78 | __asm__ __volatile__("": : :"memory"); |
80 | } | 79 | } |
81 | 80 | ||
82 | #define local_save_flags(flags) ((flags) = mfmsr()) | 81 | #define local_save_flags(flags) ((flags) = mfmsr()) |
83 | #define local_irq_save(flags) local_irq_save_ptr(&flags) | 82 | #define local_irq_save(flags) local_irq_save_ptr(&flags) |
84 | #define irqs_disabled() ((mfmsr() & MSR_EE) == 0) | 83 | #define irqs_disabled() ((mfmsr() & MSR_EE) == 0) |
85 | 84 | ||
86 | #endif /* CONFIG_PPC_ISERIES */ | 85 | #endif /* CONFIG_PPC_ISERIES */ |
87 | 86 | ||
88 | #define mask_irq(irq) \ | 87 | #define mask_irq(irq) \ |
89 | ({ \ | 88 | ({ \ |
90 | irq_desc_t *desc = get_irq_desc(irq); \ | 89 | irq_desc_t *desc = get_irq_desc(irq); \ |
91 | if (desc->handler && desc->handler->disable) \ | 90 | if (desc->handler && desc->handler->disable) \ |
92 | desc->handler->disable(irq); \ | 91 | desc->handler->disable(irq); \ |
93 | }) | 92 | }) |
94 | #define unmask_irq(irq) \ | 93 | #define unmask_irq(irq) \ |
95 | ({ \ | 94 | ({ \ |
96 | irq_desc_t *desc = get_irq_desc(irq); \ | 95 | irq_desc_t *desc = get_irq_desc(irq); \ |
97 | if (desc->handler && desc->handler->enable) \ | 96 | if (desc->handler && desc->handler->enable) \ |
98 | desc->handler->enable(irq); \ | 97 | desc->handler->enable(irq); \ |
99 | }) | 98 | }) |
100 | #define ack_irq(irq) \ | 99 | #define ack_irq(irq) \ |
101 | ({ \ | 100 | ({ \ |
102 | irq_desc_t *desc = get_irq_desc(irq); \ | 101 | irq_desc_t *desc = get_irq_desc(irq); \ |
103 | if (desc->handler && desc->handler->ack) \ | 102 | if (desc->handler && desc->handler->ack) \ |
104 | desc->handler->ack(irq); \ | 103 | desc->handler->ack(irq); \ |
105 | }) | 104 | }) |
106 | 105 | ||
107 | /* Should we handle this via lost interrupts and IPIs or should we don't care like | 106 | /* Should we handle this via lost interrupts and IPIs or should we don't care like |
108 | * we do now ? --BenH. | 107 | * we do now ? --BenH. |
109 | */ | 108 | */ |
110 | struct hw_interrupt_type; | 109 | struct hw_interrupt_type; |
111 | static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} | 110 | static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} |
112 | 111 | ||
113 | #endif /* __KERNEL__ */ | 112 | #endif /* __KERNEL__ */ |
114 | #endif /* _ASM_POWERPC_HW_IRQ_H */ | 113 | #endif /* _ASM_POWERPC_HW_IRQ_H */ |
115 | 114 |
include/asm-powerpc/irq.h
1 | #ifdef __KERNEL__ | 1 | #ifdef __KERNEL__ |
2 | #ifndef _ASM_POWERPC_IRQ_H | 2 | #ifndef _ASM_POWERPC_IRQ_H |
3 | #define _ASM_POWERPC_IRQ_H | 3 | #define _ASM_POWERPC_IRQ_H |
4 | 4 | ||
5 | /* | 5 | /* |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
14 | 14 | ||
15 | #include <asm/types.h> | 15 | #include <asm/types.h> |
16 | #include <asm/atomic.h> | 16 | #include <asm/atomic.h> |
17 | 17 | ||
18 | /* this number is used when no interrupt has been assigned */ | 18 | /* this number is used when no interrupt has been assigned */ |
19 | #define NO_IRQ (-1) | 19 | #define NO_IRQ (-1) |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * These constants are used for passing information about interrupt | 22 | * These constants are used for passing information about interrupt |
23 | * signal polarity and level/edge sensing to the low-level PIC chip | 23 | * signal polarity and level/edge sensing to the low-level PIC chip |
24 | * drivers. | 24 | * drivers. |
25 | */ | 25 | */ |
26 | #define IRQ_SENSE_MASK 0x1 | 26 | #define IRQ_SENSE_MASK 0x1 |
27 | #define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */ | 27 | #define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */ |
28 | #define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */ | 28 | #define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */ |
29 | 29 | ||
30 | #define IRQ_POLARITY_MASK 0x2 | 30 | #define IRQ_POLARITY_MASK 0x2 |
31 | #define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */ | 31 | #define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */ |
32 | #define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */ | 32 | #define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */ |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * IRQ line status macro IRQ_PER_CPU is used | 35 | * IRQ line status macro IRQ_PER_CPU is used |
36 | */ | 36 | */ |
37 | #define ARCH_HAS_IRQ_PER_CPU | 37 | #define ARCH_HAS_IRQ_PER_CPU |
38 | 38 | ||
39 | #define get_irq_desc(irq) (&irq_desc[(irq)]) | 39 | #define get_irq_desc(irq) (&irq_desc[(irq)]) |
40 | 40 | ||
41 | /* Define a way to iterate across irqs. */ | 41 | /* Define a way to iterate across irqs. */ |
42 | #define for_each_irq(i) \ | 42 | #define for_each_irq(i) \ |
43 | for ((i) = 0; (i) < NR_IRQS; ++(i)) | 43 | for ((i) = 0; (i) < NR_IRQS; ++(i)) |
44 | 44 | ||
45 | #ifdef CONFIG_PPC64 | 45 | #ifdef CONFIG_PPC64 |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Maximum number of interrupt sources that we can handle. | 48 | * Maximum number of interrupt sources that we can handle. |
49 | */ | 49 | */ |
50 | #define NR_IRQS 512 | 50 | #define NR_IRQS 512 |
51 | 51 | ||
52 | /* Interrupt numbers are virtual in case they are sparsely | 52 | /* Interrupt numbers are virtual in case they are sparsely |
53 | * distributed by the hardware. | 53 | * distributed by the hardware. |
54 | */ | 54 | */ |
55 | extern unsigned int virt_irq_to_real_map[NR_IRQS]; | 55 | extern unsigned int virt_irq_to_real_map[NR_IRQS]; |
56 | 56 | ||
57 | /* Create a mapping for a real_irq if it doesn't already exist. | 57 | /* Create a mapping for a real_irq if it doesn't already exist. |
58 | * Return the virtual irq as a convenience. | 58 | * Return the virtual irq as a convenience. |
59 | */ | 59 | */ |
60 | int virt_irq_create_mapping(unsigned int real_irq); | 60 | int virt_irq_create_mapping(unsigned int real_irq); |
61 | void virt_irq_init(void); | 61 | void virt_irq_init(void); |
62 | 62 | ||
63 | static inline unsigned int virt_irq_to_real(unsigned int virt_irq) | 63 | static inline unsigned int virt_irq_to_real(unsigned int virt_irq) |
64 | { | 64 | { |
65 | return virt_irq_to_real_map[virt_irq]; | 65 | return virt_irq_to_real_map[virt_irq]; |
66 | } | 66 | } |
67 | 67 | ||
68 | extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq); | 68 | extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq); |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * List of interrupt controllers. | 71 | * List of interrupt controllers. |
72 | */ | 72 | */ |
73 | #define IC_INVALID 0 | 73 | #define IC_INVALID 0 |
74 | #define IC_OPEN_PIC 1 | 74 | #define IC_OPEN_PIC 1 |
75 | #define IC_PPC_XIC 2 | 75 | #define IC_PPC_XIC 2 |
76 | #define IC_CELL_PIC 3 | 76 | #define IC_CELL_PIC 3 |
77 | #define IC_ISERIES 4 | 77 | #define IC_ISERIES 4 |
78 | 78 | ||
79 | extern u64 ppc64_interrupt_controller; | 79 | extern u64 ppc64_interrupt_controller; |
80 | 80 | ||
81 | #else /* 32-bit */ | 81 | #else /* 32-bit */ |
82 | 82 | ||
83 | #if defined(CONFIG_40x) | 83 | #if defined(CONFIG_40x) |
84 | #include <asm/ibm4xx.h> | 84 | #include <asm/ibm4xx.h> |
85 | 85 | ||
86 | #ifndef NR_BOARD_IRQS | 86 | #ifndef NR_BOARD_IRQS |
87 | #define NR_BOARD_IRQS 0 | 87 | #define NR_BOARD_IRQS 0 |
88 | #endif | 88 | #endif |
89 | 89 | ||
90 | #ifndef UIC_WIDTH /* Number of interrupts per device */ | 90 | #ifndef UIC_WIDTH /* Number of interrupts per device */ |
91 | #define UIC_WIDTH 32 | 91 | #define UIC_WIDTH 32 |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | #ifndef NR_UICS /* number of UIC devices */ | 94 | #ifndef NR_UICS /* number of UIC devices */ |
95 | #define NR_UICS 1 | 95 | #define NR_UICS 1 |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | #if defined (CONFIG_403) | 98 | #if defined (CONFIG_403) |
99 | /* | 99 | /* |
100 | * The PowerPC 403 cores' Asynchronous Interrupt Controller (AIC) has | 100 | * The PowerPC 403 cores' Asynchronous Interrupt Controller (AIC) has |
101 | * 32 possible interrupts, a majority of which are not implemented on | 101 | * 32 possible interrupts, a majority of which are not implemented on |
102 | * all cores. There are six configurable, external interrupt pins and | 102 | * all cores. There are six configurable, external interrupt pins and |
103 | * there are eight internal interrupts for the on-chip serial port | 103 | * there are eight internal interrupts for the on-chip serial port |
104 | * (SPU), DMA controller, and JTAG controller. | 104 | * (SPU), DMA controller, and JTAG controller. |
105 | * | 105 | * |
106 | */ | 106 | */ |
107 | 107 | ||
108 | #define NR_AIC_IRQS 32 | 108 | #define NR_AIC_IRQS 32 |
109 | #define NR_IRQS (NR_AIC_IRQS + NR_BOARD_IRQS) | 109 | #define NR_IRQS (NR_AIC_IRQS + NR_BOARD_IRQS) |
110 | 110 | ||
111 | #elif !defined (CONFIG_403) | 111 | #elif !defined (CONFIG_403) |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * The PowerPC 405 cores' Universal Interrupt Controller (UIC) has 32 | 114 | * The PowerPC 405 cores' Universal Interrupt Controller (UIC) has 32 |
115 | * possible interrupts as well. There are seven, configurable external | 115 | * possible interrupts as well. There are seven, configurable external |
116 | * interrupt pins and there are 17 internal interrupts for the on-chip | 116 | * interrupt pins and there are 17 internal interrupts for the on-chip |
117 | * serial port, DMA controller, on-chip Ethernet controller, PCI, etc. | 117 | * serial port, DMA controller, on-chip Ethernet controller, PCI, etc. |
118 | * | 118 | * |
119 | */ | 119 | */ |
120 | 120 | ||
121 | 121 | ||
122 | #define NR_UIC_IRQS UIC_WIDTH | 122 | #define NR_UIC_IRQS UIC_WIDTH |
123 | #define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS) | 123 | #define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS) |
124 | #endif | 124 | #endif |
125 | 125 | ||
126 | #elif defined(CONFIG_44x) | 126 | #elif defined(CONFIG_44x) |
127 | #include <asm/ibm44x.h> | 127 | #include <asm/ibm44x.h> |
128 | 128 | ||
129 | #define NR_UIC_IRQS 32 | 129 | #define NR_UIC_IRQS 32 |
130 | #define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS) | 130 | #define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS) |
131 | 131 | ||
132 | #elif defined(CONFIG_8xx) | 132 | #elif defined(CONFIG_8xx) |
133 | 133 | ||
134 | /* Now include the board configuration specific associations. | 134 | /* Now include the board configuration specific associations. |
135 | */ | 135 | */ |
136 | #include <asm/mpc8xx.h> | 136 | #include <asm/mpc8xx.h> |
137 | 137 | ||
138 | /* The MPC8xx cores have 16 possible interrupts. There are eight | 138 | /* The MPC8xx cores have 16 possible interrupts. There are eight |
139 | * possible level sensitive interrupts assigned and generated internally | 139 | * possible level sensitive interrupts assigned and generated internally |
140 | * from such devices as CPM, PCMCIA, RTC, PIT, TimeBase and Decrementer. | 140 | * from such devices as CPM, PCMCIA, RTC, PIT, TimeBase and Decrementer. |
141 | * There are eight external interrupts (IRQs) that can be configured | 141 | * There are eight external interrupts (IRQs) that can be configured |
142 | * as either level or edge sensitive. | 142 | * as either level or edge sensitive. |
143 | * | 143 | * |
144 | * On some implementations, there is also the possibility of an 8259 | 144 | * On some implementations, there is also the possibility of an 8259 |
145 | * through the PCI and PCI-ISA bridges. | 145 | * through the PCI and PCI-ISA bridges. |
146 | * | 146 | * |
147 | * We are "flattening" the interrupt vectors of the cascaded CPM | 147 | * We are "flattening" the interrupt vectors of the cascaded CPM |
148 | * and 8259 interrupt controllers so that we can uniquely identify | 148 | * and 8259 interrupt controllers so that we can uniquely identify |
149 | * any interrupt source with a single integer. | 149 | * any interrupt source with a single integer. |
150 | */ | 150 | */ |
151 | #define NR_SIU_INTS 16 | 151 | #define NR_SIU_INTS 16 |
152 | #define NR_CPM_INTS 32 | 152 | #define NR_CPM_INTS 32 |
153 | #ifndef NR_8259_INTS | 153 | #ifndef NR_8259_INTS |
154 | #define NR_8259_INTS 0 | 154 | #define NR_8259_INTS 0 |
155 | #endif | 155 | #endif |
156 | 156 | ||
157 | #define SIU_IRQ_OFFSET 0 | 157 | #define SIU_IRQ_OFFSET 0 |
158 | #define CPM_IRQ_OFFSET (SIU_IRQ_OFFSET + NR_SIU_INTS) | 158 | #define CPM_IRQ_OFFSET (SIU_IRQ_OFFSET + NR_SIU_INTS) |
159 | #define I8259_IRQ_OFFSET (CPM_IRQ_OFFSET + NR_CPM_INTS) | 159 | #define I8259_IRQ_OFFSET (CPM_IRQ_OFFSET + NR_CPM_INTS) |
160 | 160 | ||
161 | #define NR_IRQS (NR_SIU_INTS + NR_CPM_INTS + NR_8259_INTS) | 161 | #define NR_IRQS (NR_SIU_INTS + NR_CPM_INTS + NR_8259_INTS) |
162 | 162 | ||
163 | /* These values must be zero-based and map 1:1 with the SIU configuration. | 163 | /* These values must be zero-based and map 1:1 with the SIU configuration. |
164 | * They are used throughout the 8xx I/O subsystem to generate | 164 | * They are used throughout the 8xx I/O subsystem to generate |
165 | * interrupt masks, flags, and other control patterns. This is why the | 165 | * interrupt masks, flags, and other control patterns. This is why the |
166 | * current kernel assumption of the 8259 as the base controller is such | 166 | * current kernel assumption of the 8259 as the base controller is such |
167 | * a pain in the butt. | 167 | * a pain in the butt. |
168 | */ | 168 | */ |
169 | #define SIU_IRQ0 (0) /* Highest priority */ | 169 | #define SIU_IRQ0 (0) /* Highest priority */ |
170 | #define SIU_LEVEL0 (1) | 170 | #define SIU_LEVEL0 (1) |
171 | #define SIU_IRQ1 (2) | 171 | #define SIU_IRQ1 (2) |
172 | #define SIU_LEVEL1 (3) | 172 | #define SIU_LEVEL1 (3) |
173 | #define SIU_IRQ2 (4) | 173 | #define SIU_IRQ2 (4) |
174 | #define SIU_LEVEL2 (5) | 174 | #define SIU_LEVEL2 (5) |
175 | #define SIU_IRQ3 (6) | 175 | #define SIU_IRQ3 (6) |
176 | #define SIU_LEVEL3 (7) | 176 | #define SIU_LEVEL3 (7) |
177 | #define SIU_IRQ4 (8) | 177 | #define SIU_IRQ4 (8) |
178 | #define SIU_LEVEL4 (9) | 178 | #define SIU_LEVEL4 (9) |
179 | #define SIU_IRQ5 (10) | 179 | #define SIU_IRQ5 (10) |
180 | #define SIU_LEVEL5 (11) | 180 | #define SIU_LEVEL5 (11) |
181 | #define SIU_IRQ6 (12) | 181 | #define SIU_IRQ6 (12) |
182 | #define SIU_LEVEL6 (13) | 182 | #define SIU_LEVEL6 (13) |
183 | #define SIU_IRQ7 (14) | 183 | #define SIU_IRQ7 (14) |
184 | #define SIU_LEVEL7 (15) | 184 | #define SIU_LEVEL7 (15) |
185 | 185 | ||
186 | #define MPC8xx_INT_FEC1 SIU_LEVEL1 | 186 | #define MPC8xx_INT_FEC1 SIU_LEVEL1 |
187 | #define MPC8xx_INT_FEC2 SIU_LEVEL3 | 187 | #define MPC8xx_INT_FEC2 SIU_LEVEL3 |
188 | 188 | ||
189 | #define MPC8xx_INT_SCC1 (CPM_IRQ_OFFSET + CPMVEC_SCC1) | 189 | #define MPC8xx_INT_SCC1 (CPM_IRQ_OFFSET + CPMVEC_SCC1) |
190 | #define MPC8xx_INT_SCC2 (CPM_IRQ_OFFSET + CPMVEC_SCC2) | 190 | #define MPC8xx_INT_SCC2 (CPM_IRQ_OFFSET + CPMVEC_SCC2) |
191 | #define MPC8xx_INT_SCC3 (CPM_IRQ_OFFSET + CPMVEC_SCC3) | 191 | #define MPC8xx_INT_SCC3 (CPM_IRQ_OFFSET + CPMVEC_SCC3) |
192 | #define MPC8xx_INT_SCC4 (CPM_IRQ_OFFSET + CPMVEC_SCC4) | 192 | #define MPC8xx_INT_SCC4 (CPM_IRQ_OFFSET + CPMVEC_SCC4) |
193 | #define MPC8xx_INT_SMC1 (CPM_IRQ_OFFSET + CPMVEC_SMC1) | 193 | #define MPC8xx_INT_SMC1 (CPM_IRQ_OFFSET + CPMVEC_SMC1) |
194 | #define MPC8xx_INT_SMC2 (CPM_IRQ_OFFSET + CPMVEC_SMC2) | 194 | #define MPC8xx_INT_SMC2 (CPM_IRQ_OFFSET + CPMVEC_SMC2) |
195 | 195 | ||
196 | /* The internal interrupts we can configure as we see fit. | 196 | /* The internal interrupts we can configure as we see fit. |
197 | * My personal preference is CPM at level 2, which puts it above the | 197 | * My personal preference is CPM at level 2, which puts it above the |
198 | * MBX PCI/ISA/IDE interrupts. | 198 | * MBX PCI/ISA/IDE interrupts. |
199 | */ | 199 | */ |
200 | #ifndef PIT_INTERRUPT | 200 | #ifndef PIT_INTERRUPT |
201 | #define PIT_INTERRUPT SIU_LEVEL0 | 201 | #define PIT_INTERRUPT SIU_LEVEL0 |
202 | #endif | 202 | #endif |
203 | #ifndef CPM_INTERRUPT | 203 | #ifndef CPM_INTERRUPT |
204 | #define CPM_INTERRUPT SIU_LEVEL2 | 204 | #define CPM_INTERRUPT SIU_LEVEL2 |
205 | #endif | 205 | #endif |
206 | #ifndef PCMCIA_INTERRUPT | 206 | #ifndef PCMCIA_INTERRUPT |
207 | #define PCMCIA_INTERRUPT SIU_LEVEL6 | 207 | #define PCMCIA_INTERRUPT SIU_LEVEL6 |
208 | #endif | 208 | #endif |
209 | #ifndef DEC_INTERRUPT | 209 | #ifndef DEC_INTERRUPT |
210 | #define DEC_INTERRUPT SIU_LEVEL7 | 210 | #define DEC_INTERRUPT SIU_LEVEL7 |
211 | #endif | 211 | #endif |
212 | 212 | ||
213 | /* Some internal interrupt registers use an 8-bit mask for the interrupt | 213 | /* Some internal interrupt registers use an 8-bit mask for the interrupt |
214 | * level instead of a number. | 214 | * level instead of a number. |
215 | */ | 215 | */ |
216 | #define mk_int_int_mask(IL) (1 << (7 - (IL/2))) | 216 | #define mk_int_int_mask(IL) (1 << (7 - (IL/2))) |
217 | 217 | ||
218 | #elif defined(CONFIG_83xx) | 218 | #elif defined(CONFIG_83xx) |
219 | #include <asm/mpc83xx.h> | 219 | #include <asm/mpc83xx.h> |
220 | 220 | ||
221 | #define NR_IRQS (NR_IPIC_INTS) | 221 | #define NR_IRQS (NR_IPIC_INTS) |
222 | 222 | ||
223 | #elif defined(CONFIG_85xx) | 223 | #elif defined(CONFIG_85xx) |
224 | /* Now include the board configuration specific associations. | 224 | /* Now include the board configuration specific associations. |
225 | */ | 225 | */ |
226 | #include <asm/mpc85xx.h> | 226 | #include <asm/mpc85xx.h> |
227 | 227 | ||
228 | /* The MPC8548 openpic has 48 internal interrupts and 12 external | 228 | /* The MPC8548 openpic has 48 internal interrupts and 12 external |
229 | * interrupts. | 229 | * interrupts. |
230 | * | 230 | * |
231 | * We are "flattening" the interrupt vectors of the cascaded CPM | 231 | * We are "flattening" the interrupt vectors of the cascaded CPM |
232 | * so that we can uniquely identify any interrupt source with a | 232 | * so that we can uniquely identify any interrupt source with a |
233 | * single integer. | 233 | * single integer. |
234 | */ | 234 | */ |
235 | #define NR_CPM_INTS 64 | 235 | #define NR_CPM_INTS 64 |
236 | #define NR_EPIC_INTS 60 | 236 | #define NR_EPIC_INTS 60 |
237 | #ifndef NR_8259_INTS | 237 | #ifndef NR_8259_INTS |
238 | #define NR_8259_INTS 0 | 238 | #define NR_8259_INTS 0 |
239 | #endif | 239 | #endif |
240 | #define NUM_8259_INTERRUPTS NR_8259_INTS | 240 | #define NUM_8259_INTERRUPTS NR_8259_INTS |
241 | 241 | ||
242 | #ifndef CPM_IRQ_OFFSET | 242 | #ifndef CPM_IRQ_OFFSET |
243 | #define CPM_IRQ_OFFSET 0 | 243 | #define CPM_IRQ_OFFSET 0 |
244 | #endif | 244 | #endif |
245 | 245 | ||
246 | #define NR_IRQS (NR_EPIC_INTS + NR_CPM_INTS + NR_8259_INTS) | 246 | #define NR_IRQS (NR_EPIC_INTS + NR_CPM_INTS + NR_8259_INTS) |
247 | 247 | ||
248 | /* Internal IRQs on MPC85xx OpenPIC */ | 248 | /* Internal IRQs on MPC85xx OpenPIC */ |
249 | 249 | ||
250 | #ifndef MPC85xx_OPENPIC_IRQ_OFFSET | 250 | #ifndef MPC85xx_OPENPIC_IRQ_OFFSET |
251 | #ifdef CONFIG_CPM2 | 251 | #ifdef CONFIG_CPM2 |
252 | #define MPC85xx_OPENPIC_IRQ_OFFSET (CPM_IRQ_OFFSET + NR_CPM_INTS) | 252 | #define MPC85xx_OPENPIC_IRQ_OFFSET (CPM_IRQ_OFFSET + NR_CPM_INTS) |
253 | #else | 253 | #else |
254 | #define MPC85xx_OPENPIC_IRQ_OFFSET 0 | 254 | #define MPC85xx_OPENPIC_IRQ_OFFSET 0 |
255 | #endif | 255 | #endif |
256 | #endif | 256 | #endif |
257 | 257 | ||
258 | /* Not all of these exist on all MPC85xx implementations */ | 258 | /* Not all of these exist on all MPC85xx implementations */ |
259 | #define MPC85xx_IRQ_L2CACHE ( 0 + MPC85xx_OPENPIC_IRQ_OFFSET) | 259 | #define MPC85xx_IRQ_L2CACHE ( 0 + MPC85xx_OPENPIC_IRQ_OFFSET) |
260 | #define MPC85xx_IRQ_ECM ( 1 + MPC85xx_OPENPIC_IRQ_OFFSET) | 260 | #define MPC85xx_IRQ_ECM ( 1 + MPC85xx_OPENPIC_IRQ_OFFSET) |
261 | #define MPC85xx_IRQ_DDR ( 2 + MPC85xx_OPENPIC_IRQ_OFFSET) | 261 | #define MPC85xx_IRQ_DDR ( 2 + MPC85xx_OPENPIC_IRQ_OFFSET) |
262 | #define MPC85xx_IRQ_LBIU ( 3 + MPC85xx_OPENPIC_IRQ_OFFSET) | 262 | #define MPC85xx_IRQ_LBIU ( 3 + MPC85xx_OPENPIC_IRQ_OFFSET) |
263 | #define MPC85xx_IRQ_DMA0 ( 4 + MPC85xx_OPENPIC_IRQ_OFFSET) | 263 | #define MPC85xx_IRQ_DMA0 ( 4 + MPC85xx_OPENPIC_IRQ_OFFSET) |
264 | #define MPC85xx_IRQ_DMA1 ( 5 + MPC85xx_OPENPIC_IRQ_OFFSET) | 264 | #define MPC85xx_IRQ_DMA1 ( 5 + MPC85xx_OPENPIC_IRQ_OFFSET) |
265 | #define MPC85xx_IRQ_DMA2 ( 6 + MPC85xx_OPENPIC_IRQ_OFFSET) | 265 | #define MPC85xx_IRQ_DMA2 ( 6 + MPC85xx_OPENPIC_IRQ_OFFSET) |
266 | #define MPC85xx_IRQ_DMA3 ( 7 + MPC85xx_OPENPIC_IRQ_OFFSET) | 266 | #define MPC85xx_IRQ_DMA3 ( 7 + MPC85xx_OPENPIC_IRQ_OFFSET) |
267 | #define MPC85xx_IRQ_PCI1 ( 8 + MPC85xx_OPENPIC_IRQ_OFFSET) | 267 | #define MPC85xx_IRQ_PCI1 ( 8 + MPC85xx_OPENPIC_IRQ_OFFSET) |
268 | #define MPC85xx_IRQ_PCI2 ( 9 + MPC85xx_OPENPIC_IRQ_OFFSET) | 268 | #define MPC85xx_IRQ_PCI2 ( 9 + MPC85xx_OPENPIC_IRQ_OFFSET) |
269 | #define MPC85xx_IRQ_RIO_ERROR ( 9 + MPC85xx_OPENPIC_IRQ_OFFSET) | 269 | #define MPC85xx_IRQ_RIO_ERROR ( 9 + MPC85xx_OPENPIC_IRQ_OFFSET) |
270 | #define MPC85xx_IRQ_RIO_BELL (10 + MPC85xx_OPENPIC_IRQ_OFFSET) | 270 | #define MPC85xx_IRQ_RIO_BELL (10 + MPC85xx_OPENPIC_IRQ_OFFSET) |
271 | #define MPC85xx_IRQ_RIO_TX (11 + MPC85xx_OPENPIC_IRQ_OFFSET) | 271 | #define MPC85xx_IRQ_RIO_TX (11 + MPC85xx_OPENPIC_IRQ_OFFSET) |
272 | #define MPC85xx_IRQ_RIO_RX (12 + MPC85xx_OPENPIC_IRQ_OFFSET) | 272 | #define MPC85xx_IRQ_RIO_RX (12 + MPC85xx_OPENPIC_IRQ_OFFSET) |
273 | #define MPC85xx_IRQ_TSEC1_TX (13 + MPC85xx_OPENPIC_IRQ_OFFSET) | 273 | #define MPC85xx_IRQ_TSEC1_TX (13 + MPC85xx_OPENPIC_IRQ_OFFSET) |
274 | #define MPC85xx_IRQ_TSEC1_RX (14 + MPC85xx_OPENPIC_IRQ_OFFSET) | 274 | #define MPC85xx_IRQ_TSEC1_RX (14 + MPC85xx_OPENPIC_IRQ_OFFSET) |
275 | #define MPC85xx_IRQ_TSEC3_TX (15 + MPC85xx_OPENPIC_IRQ_OFFSET) | 275 | #define MPC85xx_IRQ_TSEC3_TX (15 + MPC85xx_OPENPIC_IRQ_OFFSET) |
276 | #define MPC85xx_IRQ_TSEC3_RX (16 + MPC85xx_OPENPIC_IRQ_OFFSET) | 276 | #define MPC85xx_IRQ_TSEC3_RX (16 + MPC85xx_OPENPIC_IRQ_OFFSET) |
277 | #define MPC85xx_IRQ_TSEC3_ERROR (17 + MPC85xx_OPENPIC_IRQ_OFFSET) | 277 | #define MPC85xx_IRQ_TSEC3_ERROR (17 + MPC85xx_OPENPIC_IRQ_OFFSET) |
278 | #define MPC85xx_IRQ_TSEC1_ERROR (18 + MPC85xx_OPENPIC_IRQ_OFFSET) | 278 | #define MPC85xx_IRQ_TSEC1_ERROR (18 + MPC85xx_OPENPIC_IRQ_OFFSET) |
279 | #define MPC85xx_IRQ_TSEC2_TX (19 + MPC85xx_OPENPIC_IRQ_OFFSET) | 279 | #define MPC85xx_IRQ_TSEC2_TX (19 + MPC85xx_OPENPIC_IRQ_OFFSET) |
280 | #define MPC85xx_IRQ_TSEC2_RX (20 + MPC85xx_OPENPIC_IRQ_OFFSET) | 280 | #define MPC85xx_IRQ_TSEC2_RX (20 + MPC85xx_OPENPIC_IRQ_OFFSET) |
281 | #define MPC85xx_IRQ_TSEC4_TX (21 + MPC85xx_OPENPIC_IRQ_OFFSET) | 281 | #define MPC85xx_IRQ_TSEC4_TX (21 + MPC85xx_OPENPIC_IRQ_OFFSET) |
282 | #define MPC85xx_IRQ_TSEC4_RX (22 + MPC85xx_OPENPIC_IRQ_OFFSET) | 282 | #define MPC85xx_IRQ_TSEC4_RX (22 + MPC85xx_OPENPIC_IRQ_OFFSET) |
283 | #define MPC85xx_IRQ_TSEC4_ERROR (23 + MPC85xx_OPENPIC_IRQ_OFFSET) | 283 | #define MPC85xx_IRQ_TSEC4_ERROR (23 + MPC85xx_OPENPIC_IRQ_OFFSET) |
284 | #define MPC85xx_IRQ_TSEC2_ERROR (24 + MPC85xx_OPENPIC_IRQ_OFFSET) | 284 | #define MPC85xx_IRQ_TSEC2_ERROR (24 + MPC85xx_OPENPIC_IRQ_OFFSET) |
285 | #define MPC85xx_IRQ_FEC (25 + MPC85xx_OPENPIC_IRQ_OFFSET) | 285 | #define MPC85xx_IRQ_FEC (25 + MPC85xx_OPENPIC_IRQ_OFFSET) |
286 | #define MPC85xx_IRQ_DUART (26 + MPC85xx_OPENPIC_IRQ_OFFSET) | 286 | #define MPC85xx_IRQ_DUART (26 + MPC85xx_OPENPIC_IRQ_OFFSET) |
287 | #define MPC85xx_IRQ_IIC1 (27 + MPC85xx_OPENPIC_IRQ_OFFSET) | 287 | #define MPC85xx_IRQ_IIC1 (27 + MPC85xx_OPENPIC_IRQ_OFFSET) |
288 | #define MPC85xx_IRQ_PERFMON (28 + MPC85xx_OPENPIC_IRQ_OFFSET) | 288 | #define MPC85xx_IRQ_PERFMON (28 + MPC85xx_OPENPIC_IRQ_OFFSET) |
289 | #define MPC85xx_IRQ_SEC2 (29 + MPC85xx_OPENPIC_IRQ_OFFSET) | 289 | #define MPC85xx_IRQ_SEC2 (29 + MPC85xx_OPENPIC_IRQ_OFFSET) |
290 | #define MPC85xx_IRQ_CPM (30 + MPC85xx_OPENPIC_IRQ_OFFSET) | 290 | #define MPC85xx_IRQ_CPM (30 + MPC85xx_OPENPIC_IRQ_OFFSET) |
291 | 291 | ||
292 | /* The 12 external interrupt lines */ | 292 | /* The 12 external interrupt lines */ |
293 | #define MPC85xx_IRQ_EXT0 (48 + MPC85xx_OPENPIC_IRQ_OFFSET) | 293 | #define MPC85xx_IRQ_EXT0 (48 + MPC85xx_OPENPIC_IRQ_OFFSET) |
294 | #define MPC85xx_IRQ_EXT1 (49 + MPC85xx_OPENPIC_IRQ_OFFSET) | 294 | #define MPC85xx_IRQ_EXT1 (49 + MPC85xx_OPENPIC_IRQ_OFFSET) |
295 | #define MPC85xx_IRQ_EXT2 (50 + MPC85xx_OPENPIC_IRQ_OFFSET) | 295 | #define MPC85xx_IRQ_EXT2 (50 + MPC85xx_OPENPIC_IRQ_OFFSET) |
296 | #define MPC85xx_IRQ_EXT3 (51 + MPC85xx_OPENPIC_IRQ_OFFSET) | 296 | #define MPC85xx_IRQ_EXT3 (51 + MPC85xx_OPENPIC_IRQ_OFFSET) |
297 | #define MPC85xx_IRQ_EXT4 (52 + MPC85xx_OPENPIC_IRQ_OFFSET) | 297 | #define MPC85xx_IRQ_EXT4 (52 + MPC85xx_OPENPIC_IRQ_OFFSET) |
298 | #define MPC85xx_IRQ_EXT5 (53 + MPC85xx_OPENPIC_IRQ_OFFSET) | 298 | #define MPC85xx_IRQ_EXT5 (53 + MPC85xx_OPENPIC_IRQ_OFFSET) |
299 | #define MPC85xx_IRQ_EXT6 (54 + MPC85xx_OPENPIC_IRQ_OFFSET) | 299 | #define MPC85xx_IRQ_EXT6 (54 + MPC85xx_OPENPIC_IRQ_OFFSET) |
300 | #define MPC85xx_IRQ_EXT7 (55 + MPC85xx_OPENPIC_IRQ_OFFSET) | 300 | #define MPC85xx_IRQ_EXT7 (55 + MPC85xx_OPENPIC_IRQ_OFFSET) |
301 | #define MPC85xx_IRQ_EXT8 (56 + MPC85xx_OPENPIC_IRQ_OFFSET) | 301 | #define MPC85xx_IRQ_EXT8 (56 + MPC85xx_OPENPIC_IRQ_OFFSET) |
302 | #define MPC85xx_IRQ_EXT9 (57 + MPC85xx_OPENPIC_IRQ_OFFSET) | 302 | #define MPC85xx_IRQ_EXT9 (57 + MPC85xx_OPENPIC_IRQ_OFFSET) |
303 | #define MPC85xx_IRQ_EXT10 (58 + MPC85xx_OPENPIC_IRQ_OFFSET) | 303 | #define MPC85xx_IRQ_EXT10 (58 + MPC85xx_OPENPIC_IRQ_OFFSET) |
304 | #define MPC85xx_IRQ_EXT11 (59 + MPC85xx_OPENPIC_IRQ_OFFSET) | 304 | #define MPC85xx_IRQ_EXT11 (59 + MPC85xx_OPENPIC_IRQ_OFFSET) |
305 | 305 | ||
306 | /* CPM related interrupts */ | 306 | /* CPM related interrupts */ |
307 | #define SIU_INT_ERROR ((uint)0x00+CPM_IRQ_OFFSET) | 307 | #define SIU_INT_ERROR ((uint)0x00+CPM_IRQ_OFFSET) |
308 | #define SIU_INT_I2C ((uint)0x01+CPM_IRQ_OFFSET) | 308 | #define SIU_INT_I2C ((uint)0x01+CPM_IRQ_OFFSET) |
309 | #define SIU_INT_SPI ((uint)0x02+CPM_IRQ_OFFSET) | 309 | #define SIU_INT_SPI ((uint)0x02+CPM_IRQ_OFFSET) |
310 | #define SIU_INT_RISC ((uint)0x03+CPM_IRQ_OFFSET) | 310 | #define SIU_INT_RISC ((uint)0x03+CPM_IRQ_OFFSET) |
311 | #define SIU_INT_SMC1 ((uint)0x04+CPM_IRQ_OFFSET) | 311 | #define SIU_INT_SMC1 ((uint)0x04+CPM_IRQ_OFFSET) |
312 | #define SIU_INT_SMC2 ((uint)0x05+CPM_IRQ_OFFSET) | 312 | #define SIU_INT_SMC2 ((uint)0x05+CPM_IRQ_OFFSET) |
313 | #define SIU_INT_USB ((uint)0x0b+CPM_IRQ_OFFSET) | 313 | #define SIU_INT_USB ((uint)0x0b+CPM_IRQ_OFFSET) |
314 | #define SIU_INT_TIMER1 ((uint)0x0c+CPM_IRQ_OFFSET) | 314 | #define SIU_INT_TIMER1 ((uint)0x0c+CPM_IRQ_OFFSET) |
315 | #define SIU_INT_TIMER2 ((uint)0x0d+CPM_IRQ_OFFSET) | 315 | #define SIU_INT_TIMER2 ((uint)0x0d+CPM_IRQ_OFFSET) |
316 | #define SIU_INT_TIMER3 ((uint)0x0e+CPM_IRQ_OFFSET) | 316 | #define SIU_INT_TIMER3 ((uint)0x0e+CPM_IRQ_OFFSET) |
317 | #define SIU_INT_TIMER4 ((uint)0x0f+CPM_IRQ_OFFSET) | 317 | #define SIU_INT_TIMER4 ((uint)0x0f+CPM_IRQ_OFFSET) |
318 | #define SIU_INT_FCC1 ((uint)0x20+CPM_IRQ_OFFSET) | 318 | #define SIU_INT_FCC1 ((uint)0x20+CPM_IRQ_OFFSET) |
319 | #define SIU_INT_FCC2 ((uint)0x21+CPM_IRQ_OFFSET) | 319 | #define SIU_INT_FCC2 ((uint)0x21+CPM_IRQ_OFFSET) |
320 | #define SIU_INT_FCC3 ((uint)0x22+CPM_IRQ_OFFSET) | 320 | #define SIU_INT_FCC3 ((uint)0x22+CPM_IRQ_OFFSET) |
321 | #define SIU_INT_MCC1 ((uint)0x24+CPM_IRQ_OFFSET) | 321 | #define SIU_INT_MCC1 ((uint)0x24+CPM_IRQ_OFFSET) |
322 | #define SIU_INT_MCC2 ((uint)0x25+CPM_IRQ_OFFSET) | 322 | #define SIU_INT_MCC2 ((uint)0x25+CPM_IRQ_OFFSET) |
323 | #define SIU_INT_SCC1 ((uint)0x28+CPM_IRQ_OFFSET) | 323 | #define SIU_INT_SCC1 ((uint)0x28+CPM_IRQ_OFFSET) |
324 | #define SIU_INT_SCC2 ((uint)0x29+CPM_IRQ_OFFSET) | 324 | #define SIU_INT_SCC2 ((uint)0x29+CPM_IRQ_OFFSET) |
325 | #define SIU_INT_SCC3 ((uint)0x2a+CPM_IRQ_OFFSET) | 325 | #define SIU_INT_SCC3 ((uint)0x2a+CPM_IRQ_OFFSET) |
326 | #define SIU_INT_SCC4 ((uint)0x2b+CPM_IRQ_OFFSET) | 326 | #define SIU_INT_SCC4 ((uint)0x2b+CPM_IRQ_OFFSET) |
327 | #define SIU_INT_PC15 ((uint)0x30+CPM_IRQ_OFFSET) | 327 | #define SIU_INT_PC15 ((uint)0x30+CPM_IRQ_OFFSET) |
328 | #define SIU_INT_PC14 ((uint)0x31+CPM_IRQ_OFFSET) | 328 | #define SIU_INT_PC14 ((uint)0x31+CPM_IRQ_OFFSET) |
329 | #define SIU_INT_PC13 ((uint)0x32+CPM_IRQ_OFFSET) | 329 | #define SIU_INT_PC13 ((uint)0x32+CPM_IRQ_OFFSET) |
330 | #define SIU_INT_PC12 ((uint)0x33+CPM_IRQ_OFFSET) | 330 | #define SIU_INT_PC12 ((uint)0x33+CPM_IRQ_OFFSET) |
331 | #define SIU_INT_PC11 ((uint)0x34+CPM_IRQ_OFFSET) | 331 | #define SIU_INT_PC11 ((uint)0x34+CPM_IRQ_OFFSET) |
332 | #define SIU_INT_PC10 ((uint)0x35+CPM_IRQ_OFFSET) | 332 | #define SIU_INT_PC10 ((uint)0x35+CPM_IRQ_OFFSET) |
333 | #define SIU_INT_PC9 ((uint)0x36+CPM_IRQ_OFFSET) | 333 | #define SIU_INT_PC9 ((uint)0x36+CPM_IRQ_OFFSET) |
334 | #define SIU_INT_PC8 ((uint)0x37+CPM_IRQ_OFFSET) | 334 | #define SIU_INT_PC8 ((uint)0x37+CPM_IRQ_OFFSET) |
335 | #define SIU_INT_PC7 ((uint)0x38+CPM_IRQ_OFFSET) | 335 | #define SIU_INT_PC7 ((uint)0x38+CPM_IRQ_OFFSET) |
336 | #define SIU_INT_PC6 ((uint)0x39+CPM_IRQ_OFFSET) | 336 | #define SIU_INT_PC6 ((uint)0x39+CPM_IRQ_OFFSET) |
337 | #define SIU_INT_PC5 ((uint)0x3a+CPM_IRQ_OFFSET) | 337 | #define SIU_INT_PC5 ((uint)0x3a+CPM_IRQ_OFFSET) |
338 | #define SIU_INT_PC4 ((uint)0x3b+CPM_IRQ_OFFSET) | 338 | #define SIU_INT_PC4 ((uint)0x3b+CPM_IRQ_OFFSET) |
339 | #define SIU_INT_PC3 ((uint)0x3c+CPM_IRQ_OFFSET) | 339 | #define SIU_INT_PC3 ((uint)0x3c+CPM_IRQ_OFFSET) |
340 | #define SIU_INT_PC2 ((uint)0x3d+CPM_IRQ_OFFSET) | 340 | #define SIU_INT_PC2 ((uint)0x3d+CPM_IRQ_OFFSET) |
341 | #define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET) | 341 | #define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET) |
342 | #define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET) | 342 | #define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET) |
343 | 343 | ||
344 | #else /* CONFIG_40x + CONFIG_8xx */ | 344 | #else /* CONFIG_40x + CONFIG_8xx */ |
345 | /* | 345 | /* |
346 | * this is the # irq's for all ppc arch's (pmac/chrp/prep) | 346 | * this is the # irq's for all ppc arch's (pmac/chrp/prep) |
347 | * so it is the max of them all | 347 | * so it is the max of them all |
348 | */ | 348 | */ |
349 | #define NR_IRQS 256 | 349 | #define NR_IRQS 256 |
350 | #define __DO_IRQ_CANON 1 | 350 | #define __DO_IRQ_CANON 1 |
351 | 351 | ||
352 | #ifndef CONFIG_8260 | 352 | #ifndef CONFIG_8260 |
353 | 353 | ||
354 | #define NUM_8259_INTERRUPTS 16 | 354 | #define NUM_8259_INTERRUPTS 16 |
355 | 355 | ||
356 | #else /* CONFIG_8260 */ | 356 | #else /* CONFIG_8260 */ |
357 | 357 | ||
358 | /* The 8260 has an internal interrupt controller with a maximum of | 358 | /* The 8260 has an internal interrupt controller with a maximum of |
359 | * 64 IRQs. We will use NR_IRQs from above since it is large enough. | 359 | * 64 IRQs. We will use NR_IRQs from above since it is large enough. |
360 | * Don't be confused by the 8260 documentation where they list an | 360 | * Don't be confused by the 8260 documentation where they list an |
361 | * "interrupt number" and "interrupt vector". We are only interested | 361 | * "interrupt number" and "interrupt vector". We are only interested |
362 | * in the interrupt vector. There are "reserved" holes where the | 362 | * in the interrupt vector. There are "reserved" holes where the |
363 | * vector number increases, but the interrupt number in the table does not. | 363 | * vector number increases, but the interrupt number in the table does not. |
364 | * (Document errata updates have fixed this...make sure you have up to | 364 | * (Document errata updates have fixed this...make sure you have up to |
365 | * date processor documentation -- Dan). | 365 | * date processor documentation -- Dan). |
366 | */ | 366 | */ |
367 | 367 | ||
368 | #ifndef CPM_IRQ_OFFSET | 368 | #ifndef CPM_IRQ_OFFSET |
369 | #define CPM_IRQ_OFFSET 0 | 369 | #define CPM_IRQ_OFFSET 0 |
370 | #endif | 370 | #endif |
371 | 371 | ||
372 | #define NR_CPM_INTS 64 | 372 | #define NR_CPM_INTS 64 |
373 | 373 | ||
374 | #define SIU_INT_ERROR ((uint)0x00 + CPM_IRQ_OFFSET) | 374 | #define SIU_INT_ERROR ((uint)0x00 + CPM_IRQ_OFFSET) |
375 | #define SIU_INT_I2C ((uint)0x01 + CPM_IRQ_OFFSET) | 375 | #define SIU_INT_I2C ((uint)0x01 + CPM_IRQ_OFFSET) |
376 | #define SIU_INT_SPI ((uint)0x02 + CPM_IRQ_OFFSET) | 376 | #define SIU_INT_SPI ((uint)0x02 + CPM_IRQ_OFFSET) |
377 | #define SIU_INT_RISC ((uint)0x03 + CPM_IRQ_OFFSET) | 377 | #define SIU_INT_RISC ((uint)0x03 + CPM_IRQ_OFFSET) |
378 | #define SIU_INT_SMC1 ((uint)0x04 + CPM_IRQ_OFFSET) | 378 | #define SIU_INT_SMC1 ((uint)0x04 + CPM_IRQ_OFFSET) |
379 | #define SIU_INT_SMC2 ((uint)0x05 + CPM_IRQ_OFFSET) | 379 | #define SIU_INT_SMC2 ((uint)0x05 + CPM_IRQ_OFFSET) |
380 | #define SIU_INT_IDMA1 ((uint)0x06 + CPM_IRQ_OFFSET) | 380 | #define SIU_INT_IDMA1 ((uint)0x06 + CPM_IRQ_OFFSET) |
381 | #define SIU_INT_IDMA2 ((uint)0x07 + CPM_IRQ_OFFSET) | 381 | #define SIU_INT_IDMA2 ((uint)0x07 + CPM_IRQ_OFFSET) |
382 | #define SIU_INT_IDMA3 ((uint)0x08 + CPM_IRQ_OFFSET) | 382 | #define SIU_INT_IDMA3 ((uint)0x08 + CPM_IRQ_OFFSET) |
383 | #define SIU_INT_IDMA4 ((uint)0x09 + CPM_IRQ_OFFSET) | 383 | #define SIU_INT_IDMA4 ((uint)0x09 + CPM_IRQ_OFFSET) |
384 | #define SIU_INT_SDMA ((uint)0x0a + CPM_IRQ_OFFSET) | 384 | #define SIU_INT_SDMA ((uint)0x0a + CPM_IRQ_OFFSET) |
385 | #define SIU_INT_USB ((uint)0x0b + CPM_IRQ_OFFSET) | 385 | #define SIU_INT_USB ((uint)0x0b + CPM_IRQ_OFFSET) |
386 | #define SIU_INT_TIMER1 ((uint)0x0c + CPM_IRQ_OFFSET) | 386 | #define SIU_INT_TIMER1 ((uint)0x0c + CPM_IRQ_OFFSET) |
387 | #define SIU_INT_TIMER2 ((uint)0x0d + CPM_IRQ_OFFSET) | 387 | #define SIU_INT_TIMER2 ((uint)0x0d + CPM_IRQ_OFFSET) |
388 | #define SIU_INT_TIMER3 ((uint)0x0e + CPM_IRQ_OFFSET) | 388 | #define SIU_INT_TIMER3 ((uint)0x0e + CPM_IRQ_OFFSET) |
389 | #define SIU_INT_TIMER4 ((uint)0x0f + CPM_IRQ_OFFSET) | 389 | #define SIU_INT_TIMER4 ((uint)0x0f + CPM_IRQ_OFFSET) |
390 | #define SIU_INT_TMCNT ((uint)0x10 + CPM_IRQ_OFFSET) | 390 | #define SIU_INT_TMCNT ((uint)0x10 + CPM_IRQ_OFFSET) |
391 | #define SIU_INT_PIT ((uint)0x11 + CPM_IRQ_OFFSET) | 391 | #define SIU_INT_PIT ((uint)0x11 + CPM_IRQ_OFFSET) |
392 | #define SIU_INT_IRQ1 ((uint)0x13 + CPM_IRQ_OFFSET) | 392 | #define SIU_INT_IRQ1 ((uint)0x13 + CPM_IRQ_OFFSET) |
393 | #define SIU_INT_IRQ2 ((uint)0x14 + CPM_IRQ_OFFSET) | 393 | #define SIU_INT_IRQ2 ((uint)0x14 + CPM_IRQ_OFFSET) |
394 | #define SIU_INT_IRQ3 ((uint)0x15 + CPM_IRQ_OFFSET) | 394 | #define SIU_INT_IRQ3 ((uint)0x15 + CPM_IRQ_OFFSET) |
395 | #define SIU_INT_IRQ4 ((uint)0x16 + CPM_IRQ_OFFSET) | 395 | #define SIU_INT_IRQ4 ((uint)0x16 + CPM_IRQ_OFFSET) |
396 | #define SIU_INT_IRQ5 ((uint)0x17 + CPM_IRQ_OFFSET) | 396 | #define SIU_INT_IRQ5 ((uint)0x17 + CPM_IRQ_OFFSET) |
397 | #define SIU_INT_IRQ6 ((uint)0x18 + CPM_IRQ_OFFSET) | 397 | #define SIU_INT_IRQ6 ((uint)0x18 + CPM_IRQ_OFFSET) |
398 | #define SIU_INT_IRQ7 ((uint)0x19 + CPM_IRQ_OFFSET) | 398 | #define SIU_INT_IRQ7 ((uint)0x19 + CPM_IRQ_OFFSET) |
399 | #define SIU_INT_FCC1 ((uint)0x20 + CPM_IRQ_OFFSET) | 399 | #define SIU_INT_FCC1 ((uint)0x20 + CPM_IRQ_OFFSET) |
400 | #define SIU_INT_FCC2 ((uint)0x21 + CPM_IRQ_OFFSET) | 400 | #define SIU_INT_FCC2 ((uint)0x21 + CPM_IRQ_OFFSET) |
401 | #define SIU_INT_FCC3 ((uint)0x22 + CPM_IRQ_OFFSET) | 401 | #define SIU_INT_FCC3 ((uint)0x22 + CPM_IRQ_OFFSET) |
402 | #define SIU_INT_MCC1 ((uint)0x24 + CPM_IRQ_OFFSET) | 402 | #define SIU_INT_MCC1 ((uint)0x24 + CPM_IRQ_OFFSET) |
403 | #define SIU_INT_MCC2 ((uint)0x25 + CPM_IRQ_OFFSET) | 403 | #define SIU_INT_MCC2 ((uint)0x25 + CPM_IRQ_OFFSET) |
404 | #define SIU_INT_SCC1 ((uint)0x28 + CPM_IRQ_OFFSET) | 404 | #define SIU_INT_SCC1 ((uint)0x28 + CPM_IRQ_OFFSET) |
405 | #define SIU_INT_SCC2 ((uint)0x29 + CPM_IRQ_OFFSET) | 405 | #define SIU_INT_SCC2 ((uint)0x29 + CPM_IRQ_OFFSET) |
406 | #define SIU_INT_SCC3 ((uint)0x2a + CPM_IRQ_OFFSET) | 406 | #define SIU_INT_SCC3 ((uint)0x2a + CPM_IRQ_OFFSET) |
407 | #define SIU_INT_SCC4 ((uint)0x2b + CPM_IRQ_OFFSET) | 407 | #define SIU_INT_SCC4 ((uint)0x2b + CPM_IRQ_OFFSET) |
408 | #define SIU_INT_PC15 ((uint)0x30 + CPM_IRQ_OFFSET) | 408 | #define SIU_INT_PC15 ((uint)0x30 + CPM_IRQ_OFFSET) |
409 | #define SIU_INT_PC14 ((uint)0x31 + CPM_IRQ_OFFSET) | 409 | #define SIU_INT_PC14 ((uint)0x31 + CPM_IRQ_OFFSET) |
410 | #define SIU_INT_PC13 ((uint)0x32 + CPM_IRQ_OFFSET) | 410 | #define SIU_INT_PC13 ((uint)0x32 + CPM_IRQ_OFFSET) |
411 | #define SIU_INT_PC12 ((uint)0x33 + CPM_IRQ_OFFSET) | 411 | #define SIU_INT_PC12 ((uint)0x33 + CPM_IRQ_OFFSET) |
412 | #define SIU_INT_PC11 ((uint)0x34 + CPM_IRQ_OFFSET) | 412 | #define SIU_INT_PC11 ((uint)0x34 + CPM_IRQ_OFFSET) |
413 | #define SIU_INT_PC10 ((uint)0x35 + CPM_IRQ_OFFSET) | 413 | #define SIU_INT_PC10 ((uint)0x35 + CPM_IRQ_OFFSET) |
414 | #define SIU_INT_PC9 ((uint)0x36 + CPM_IRQ_OFFSET) | 414 | #define SIU_INT_PC9 ((uint)0x36 + CPM_IRQ_OFFSET) |
415 | #define SIU_INT_PC8 ((uint)0x37 + CPM_IRQ_OFFSET) | 415 | #define SIU_INT_PC8 ((uint)0x37 + CPM_IRQ_OFFSET) |
416 | #define SIU_INT_PC7 ((uint)0x38 + CPM_IRQ_OFFSET) | 416 | #define SIU_INT_PC7 ((uint)0x38 + CPM_IRQ_OFFSET) |
417 | #define SIU_INT_PC6 ((uint)0x39 + CPM_IRQ_OFFSET) | 417 | #define SIU_INT_PC6 ((uint)0x39 + CPM_IRQ_OFFSET) |
418 | #define SIU_INT_PC5 ((uint)0x3a + CPM_IRQ_OFFSET) | 418 | #define SIU_INT_PC5 ((uint)0x3a + CPM_IRQ_OFFSET) |
419 | #define SIU_INT_PC4 ((uint)0x3b + CPM_IRQ_OFFSET) | 419 | #define SIU_INT_PC4 ((uint)0x3b + CPM_IRQ_OFFSET) |
420 | #define SIU_INT_PC3 ((uint)0x3c + CPM_IRQ_OFFSET) | 420 | #define SIU_INT_PC3 ((uint)0x3c + CPM_IRQ_OFFSET) |
421 | #define SIU_INT_PC2 ((uint)0x3d + CPM_IRQ_OFFSET) | 421 | #define SIU_INT_PC2 ((uint)0x3d + CPM_IRQ_OFFSET) |
422 | #define SIU_INT_PC1 ((uint)0x3e + CPM_IRQ_OFFSET) | 422 | #define SIU_INT_PC1 ((uint)0x3e + CPM_IRQ_OFFSET) |
423 | #define SIU_INT_PC0 ((uint)0x3f + CPM_IRQ_OFFSET) | 423 | #define SIU_INT_PC0 ((uint)0x3f + CPM_IRQ_OFFSET) |
424 | 424 | ||
425 | #endif /* CONFIG_8260 */ | 425 | #endif /* CONFIG_8260 */ |
426 | 426 | ||
427 | #endif | 427 | #endif |
428 | 428 | ||
429 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | 429 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) |
430 | /* pedantic: these are long because they are used with set_bit --RR */ | 430 | /* pedantic: these are long because they are used with set_bit --RR */ |
431 | extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | 431 | extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; |
432 | extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; | 432 | extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; |
433 | extern atomic_t ppc_n_lost_interrupts; | 433 | extern atomic_t ppc_n_lost_interrupts; |
434 | 434 | ||
435 | #define virt_irq_create_mapping(x) (x) | 435 | #define virt_irq_create_mapping(x) (x) |
436 | 436 | ||
437 | #endif | 437 | #endif |
438 | 438 | ||
439 | /* | 439 | /* |
440 | * Because many systems have two overlapping names spaces for | 440 | * Because many systems have two overlapping names spaces for |
441 | * interrupts (ISA and XICS for example), and the ISA interrupts | 441 | * interrupts (ISA and XICS for example), and the ISA interrupts |
442 | * have historically not been easy to renumber, we allow ISA | 442 | * have historically not been easy to renumber, we allow ISA |
443 | * interrupts to take values 0 - 15, and shift up the remaining | 443 | * interrupts to take values 0 - 15, and shift up the remaining |
444 | * interrupts by 0x10. | 444 | * interrupts by 0x10. |
445 | */ | 445 | */ |
446 | #define NUM_ISA_INTERRUPTS 0x10 | 446 | #define NUM_ISA_INTERRUPTS 0x10 |
447 | extern int __irq_offset_value; | 447 | extern int __irq_offset_value; |
448 | 448 | ||
449 | static inline int irq_offset_up(int irq) | 449 | static inline int irq_offset_up(int irq) |
450 | { | 450 | { |
451 | return(irq + __irq_offset_value); | 451 | return(irq + __irq_offset_value); |
452 | } | 452 | } |
453 | 453 | ||
454 | static inline int irq_offset_down(int irq) | 454 | static inline int irq_offset_down(int irq) |
455 | { | 455 | { |
456 | return(irq - __irq_offset_value); | 456 | return(irq - __irq_offset_value); |
457 | } | 457 | } |
458 | 458 | ||
459 | static inline int irq_offset_value(void) | 459 | static inline int irq_offset_value(void) |
460 | { | 460 | { |
461 | return __irq_offset_value; | 461 | return __irq_offset_value; |
462 | } | 462 | } |
463 | 463 | ||
464 | #ifdef __DO_IRQ_CANON | 464 | #ifdef __DO_IRQ_CANON |
465 | extern int ppc_do_canonicalize_irqs; | 465 | extern int ppc_do_canonicalize_irqs; |
466 | #else | 466 | #else |
467 | #define ppc_do_canonicalize_irqs 0 | 467 | #define ppc_do_canonicalize_irqs 0 |
468 | #endif | 468 | #endif |
469 | 469 | ||
470 | static __inline__ int irq_canonicalize(int irq) | 470 | static __inline__ int irq_canonicalize(int irq) |
471 | { | 471 | { |
472 | if (ppc_do_canonicalize_irqs && irq == 2) | 472 | if (ppc_do_canonicalize_irqs && irq == 2) |
473 | irq = 9; | 473 | irq = 9; |
474 | return irq; | 474 | return irq; |
475 | } | 475 | } |
476 | 476 | ||
477 | extern int distribute_irqs; | 477 | extern int distribute_irqs; |
478 | 478 | ||
479 | struct irqaction; | 479 | struct irqaction; |
480 | struct pt_regs; | 480 | struct pt_regs; |
481 | 481 | ||
482 | #ifdef CONFIG_IRQSTACKS | 482 | #ifdef CONFIG_IRQSTACKS |
483 | /* | 483 | /* |
484 | * Per-cpu stacks for handling hard and soft interrupts. | 484 | * Per-cpu stacks for handling hard and soft interrupts. |
485 | */ | 485 | */ |
486 | extern struct thread_info *hardirq_ctx[NR_CPUS]; | 486 | extern struct thread_info *hardirq_ctx[NR_CPUS]; |
487 | extern struct thread_info *softirq_ctx[NR_CPUS]; | 487 | extern struct thread_info *softirq_ctx[NR_CPUS]; |
488 | 488 | ||
489 | extern void irq_ctx_init(void); | 489 | extern void irq_ctx_init(void); |
490 | extern void call_do_softirq(struct thread_info *tp); | 490 | extern void call_do_softirq(struct thread_info *tp); |
491 | extern int call_ppc_irq_dispatch_handler(struct pt_regs *regs, int irq, | 491 | extern int call___do_IRQ(int irq, struct pt_regs *regs, |
492 | struct thread_info *tp); | 492 | struct thread_info *tp); |
493 | 493 | ||
494 | #define __ARCH_HAS_DO_SOFTIRQ | 494 | #define __ARCH_HAS_DO_SOFTIRQ |
495 | 495 | ||
496 | #else | 496 | #else |
497 | #define irq_ctx_init() | 497 | #define irq_ctx_init() |
498 | 498 | ||
499 | #endif /* CONFIG_IRQSTACKS */ | 499 | #endif /* CONFIG_IRQSTACKS */ |
500 | 500 | ||
501 | extern void do_IRQ(struct pt_regs *regs); | 501 | extern void do_IRQ(struct pt_regs *regs); |
502 | 502 | ||
503 | #endif /* _ASM_IRQ_H */ | 503 | #endif /* _ASM_IRQ_H */ |
504 | #endif /* __KERNEL__ */ | 504 | #endif /* __KERNEL__ */ |
505 | 505 |