Commit d04c56f73c30a5e593202ecfcf25ed43d42363a2

Authored by Paul Mackerras
1 parent 284a940675

[POWERPC] Lazy interrupt disabling for 64-bit machines

This implements a lazy strategy for disabling interrupts.  This means
that local_irq_disable() et al. just clear the 'interrupts are
enabled' flag in the paca.  If an interrupt comes along, the interrupt
entry code notices that interrupts are supposed to be disabled, and
clears the EE bit in SRR1, clears the 'interrupts are hard-enabled'
flag in the paca, and returns.  This means that interrupts only
actually get disabled in the processor when an interrupt comes along.

When interrupts are enabled by local_irq_enable() et al., the code
sets the interrupts-enabled flag in the paca, and then checks whether
interrupts got hard-disabled.  If so, it also sets the EE bit in the
MSR to hard-enable the interrupts.

This has the potential to improve performance, and also makes it
easier to make a kernel that can boot on iSeries and on other 64-bit
machines, since this lazy-disable strategy is very similar to the
soft-disable strategy that iSeries already uses.

This version renames paca->proc_enabled to paca->soft_enabled, and
changes a couple of soft-disables in the kexec code to hard-disables,
which should fix the crash that Michael Ellerman saw.  This doesn't
yet use a reserved CR field for the soft_enabled and hard_enabled
flags.  This applies on top of Stephen Rothwell's patches to make it
possible to build a combined iSeries/other kernel.

Signed-off-by: Paul Mackerras <paulus@samba.org>

Showing 12 changed files with 160 additions and 111 deletions Inline Diff

arch/powerpc/kernel/asm-offsets.c
1 /* 1 /*
2 * This program is used to generate definitions needed by 2 * This program is used to generate definitions needed by
3 * assembly language modules. 3 * assembly language modules.
4 * 4 *
5 * We use the technique used in the OSF Mach kernel code: 5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines, 6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the 7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output. 8 * #defines from the assembly-language output.
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version. 13 * 2 of the License, or (at your option) any later version.
14 */ 14 */
15 15
16 #include <linux/signal.h> 16 #include <linux/signal.h>
17 #include <linux/sched.h> 17 #include <linux/sched.h>
18 #include <linux/kernel.h> 18 #include <linux/kernel.h>
19 #include <linux/errno.h> 19 #include <linux/errno.h>
20 #include <linux/string.h> 20 #include <linux/string.h>
21 #include <linux/types.h> 21 #include <linux/types.h>
22 #include <linux/mman.h> 22 #include <linux/mman.h>
23 #include <linux/mm.h> 23 #include <linux/mm.h>
24 #ifdef CONFIG_PPC64 24 #ifdef CONFIG_PPC64
25 #include <linux/time.h> 25 #include <linux/time.h>
26 #include <linux/hardirq.h> 26 #include <linux/hardirq.h>
27 #else 27 #else
28 #include <linux/ptrace.h> 28 #include <linux/ptrace.h>
29 #include <linux/suspend.h> 29 #include <linux/suspend.h>
30 #endif 30 #endif
31 31
32 #include <asm/io.h> 32 #include <asm/io.h>
33 #include <asm/page.h> 33 #include <asm/page.h>
34 #include <asm/pgtable.h> 34 #include <asm/pgtable.h>
35 #include <asm/processor.h> 35 #include <asm/processor.h>
36 #include <asm/cputable.h> 36 #include <asm/cputable.h>
37 #include <asm/thread_info.h> 37 #include <asm/thread_info.h>
38 #include <asm/rtas.h> 38 #include <asm/rtas.h>
39 #include <asm/vdso_datapage.h> 39 #include <asm/vdso_datapage.h>
40 #ifdef CONFIG_PPC64 40 #ifdef CONFIG_PPC64
41 #include <asm/paca.h> 41 #include <asm/paca.h>
42 #include <asm/lppaca.h> 42 #include <asm/lppaca.h>
43 #include <asm/cache.h> 43 #include <asm/cache.h>
44 #include <asm/compat.h> 44 #include <asm/compat.h>
45 #include <asm/mmu.h> 45 #include <asm/mmu.h>
46 #include <asm/hvcall.h> 46 #include <asm/hvcall.h>
47 #endif 47 #endif
48 48
49 #define DEFINE(sym, val) \ 49 #define DEFINE(sym, val) \
50 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 50 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
51 51
52 #define BLANK() asm volatile("\n->" : : ) 52 #define BLANK() asm volatile("\n->" : : )
53 53
54 int main(void) 54 int main(void)
55 { 55 {
56 DEFINE(THREAD, offsetof(struct task_struct, thread)); 56 DEFINE(THREAD, offsetof(struct task_struct, thread));
57 DEFINE(MM, offsetof(struct task_struct, mm)); 57 DEFINE(MM, offsetof(struct task_struct, mm));
58 #ifdef CONFIG_PPC64 58 #ifdef CONFIG_PPC64
59 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); 59 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
60 #else 60 #else
61 DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); 61 DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
62 DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); 62 DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
63 #endif /* CONFIG_PPC64 */ 63 #endif /* CONFIG_PPC64 */
64 64
65 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 65 DEFINE(KSP, offsetof(struct thread_struct, ksp));
66 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 66 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
67 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); 67 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
68 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); 68 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
69 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); 69 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
70 #ifdef CONFIG_ALTIVEC 70 #ifdef CONFIG_ALTIVEC
71 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); 71 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
72 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); 72 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
73 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); 73 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
74 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); 74 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
75 #endif /* CONFIG_ALTIVEC */ 75 #endif /* CONFIG_ALTIVEC */
76 #ifdef CONFIG_PPC64 76 #ifdef CONFIG_PPC64
77 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); 77 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
78 #else /* CONFIG_PPC64 */ 78 #else /* CONFIG_PPC64 */
79 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); 79 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
80 DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall)); 80 DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
81 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 81 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
82 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); 82 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
83 DEFINE(PT_PTRACED, PT_PTRACED); 83 DEFINE(PT_PTRACED, PT_PTRACED);
84 #endif 84 #endif
85 #ifdef CONFIG_SPE 85 #ifdef CONFIG_SPE
86 DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); 86 DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
87 DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); 87 DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
88 DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); 88 DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
89 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); 89 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
90 #endif /* CONFIG_SPE */ 90 #endif /* CONFIG_SPE */
91 #endif /* CONFIG_PPC64 */ 91 #endif /* CONFIG_PPC64 */
92 92
93 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 93 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
94 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); 94 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
95 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 95 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
96 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 96 DEFINE(TI_TASK, offsetof(struct thread_info, task));
97 #ifdef CONFIG_PPC32 97 #ifdef CONFIG_PPC32
98 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); 98 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
99 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 99 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
100 #endif /* CONFIG_PPC32 */ 100 #endif /* CONFIG_PPC32 */
101 101
102 #ifdef CONFIG_PPC64 102 #ifdef CONFIG_PPC64
103 DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size)); 103 DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
104 DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size)); 104 DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
105 DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page)); 105 DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
106 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); 106 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
107 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); 107 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
108 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 108 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
109 /* paca */ 109 /* paca */
110 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 110 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
111 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); 111 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
112 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); 112 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
113 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); 113 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
114 DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); 114 DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
115 DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); 115 DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
116 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real)); 116 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
117 DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr)); 117 DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
118 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); 118 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
119 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); 119 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
120 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); 120 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
121 DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled)); 121 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
122 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
122 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 123 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
123 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 124 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
124 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 125 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
125 DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp)); 126 DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
126 DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); 127 DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
127 #ifdef CONFIG_HUGETLB_PAGE 128 #ifdef CONFIG_HUGETLB_PAGE
128 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); 129 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
129 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); 130 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
130 #endif /* CONFIG_HUGETLB_PAGE */ 131 #endif /* CONFIG_HUGETLB_PAGE */
131 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); 132 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
132 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); 133 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
133 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); 134 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
134 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 135 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
135 DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); 136 DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
136 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 137 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
137 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); 138 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
138 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 139 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
139 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 140 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
140 DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); 141 DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
141 DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); 142 DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
142 143
143 DEFINE(SLBSHADOW_STACKVSID, 144 DEFINE(SLBSHADOW_STACKVSID,
144 offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); 145 offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid));
145 DEFINE(SLBSHADOW_STACKESID, 146 DEFINE(SLBSHADOW_STACKESID,
146 offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); 147 offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid));
147 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); 148 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
148 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); 149 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
149 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); 150 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
150 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); 151 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
151 DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); 152 DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
152 #endif /* CONFIG_PPC64 */ 153 #endif /* CONFIG_PPC64 */
153 154
154 /* RTAS */ 155 /* RTAS */
155 DEFINE(RTASBASE, offsetof(struct rtas_t, base)); 156 DEFINE(RTASBASE, offsetof(struct rtas_t, base));
156 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); 157 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
157 158
158 /* Interrupt register frame */ 159 /* Interrupt register frame */
159 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); 160 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
160 #ifndef CONFIG_PPC64 161 #ifndef CONFIG_PPC64
161 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); 162 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
162 #else /* CONFIG_PPC64 */ 163 #else /* CONFIG_PPC64 */
163 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); 164 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
164 /* 288 = # of volatile regs, int & fp, for leaf routines */ 165 /* 288 = # of volatile regs, int & fp, for leaf routines */
165 /* which do not stack a frame. See the PPC64 ABI. */ 166 /* which do not stack a frame. See the PPC64 ABI. */
166 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288); 167 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
167 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ 168 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
168 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); 169 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
169 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); 170 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
170 171
171 /* hcall statistics */ 172 /* hcall statistics */
172 DEFINE(HCALL_STAT_SIZE, sizeof(struct hcall_stats)); 173 DEFINE(HCALL_STAT_SIZE, sizeof(struct hcall_stats));
173 DEFINE(HCALL_STAT_CALLS, offsetof(struct hcall_stats, num_calls)); 174 DEFINE(HCALL_STAT_CALLS, offsetof(struct hcall_stats, num_calls));
174 DEFINE(HCALL_STAT_TB, offsetof(struct hcall_stats, tb_total)); 175 DEFINE(HCALL_STAT_TB, offsetof(struct hcall_stats, tb_total));
175 DEFINE(HCALL_STAT_PURR, offsetof(struct hcall_stats, purr_total)); 176 DEFINE(HCALL_STAT_PURR, offsetof(struct hcall_stats, purr_total));
176 #endif /* CONFIG_PPC64 */ 177 #endif /* CONFIG_PPC64 */
177 DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); 178 DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
178 DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); 179 DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
179 DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); 180 DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
180 DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); 181 DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
181 DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); 182 DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
182 DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); 183 DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
183 DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); 184 DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
184 DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); 185 DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
185 DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); 186 DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
186 DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); 187 DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
187 DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); 188 DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
188 DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); 189 DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
189 DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); 190 DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
190 DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); 191 DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
191 #ifndef CONFIG_PPC64 192 #ifndef CONFIG_PPC64
192 DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); 193 DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
193 DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15])); 194 DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
194 DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16])); 195 DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
195 DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17])); 196 DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
196 DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18])); 197 DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
197 DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19])); 198 DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
198 DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20])); 199 DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
199 DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21])); 200 DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
200 DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22])); 201 DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
201 DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23])); 202 DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
202 DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24])); 203 DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
203 DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25])); 204 DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
204 DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26])); 205 DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
205 DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27])); 206 DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
206 DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28])); 207 DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
207 DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29])); 208 DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
208 DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30])); 209 DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
209 DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31])); 210 DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
210 #endif /* CONFIG_PPC64 */ 211 #endif /* CONFIG_PPC64 */
211 /* 212 /*
212 * Note: these symbols include _ because they overlap with special 213 * Note: these symbols include _ because they overlap with special
213 * register names 214 * register names
214 */ 215 */
215 DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); 216 DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
216 DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); 217 DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
217 DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); 218 DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
218 DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); 219 DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
219 DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); 220 DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
220 DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); 221 DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
221 DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); 222 DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
222 DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); 223 DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
223 DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); 224 DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
224 DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); 225 DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
225 DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); 226 DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
226 #ifndef CONFIG_PPC64 227 #ifndef CONFIG_PPC64
227 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq)); 228 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
228 /* 229 /*
229 * The PowerPC 400-class & Book-E processors have neither the DAR 230 * The PowerPC 400-class & Book-E processors have neither the DAR
230 * nor the DSISR SPRs. Hence, we overload them to hold the similar 231 * nor the DSISR SPRs. Hence, we overload them to hold the similar
231 * DEAR and ESR SPRs for such processors. For critical interrupts 232 * DEAR and ESR SPRs for such processors. For critical interrupts
232 * we use them to hold SRR0 and SRR1. 233 * we use them to hold SRR0 and SRR1.
233 */ 234 */
234 DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); 235 DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
235 DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); 236 DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
236 #else /* CONFIG_PPC64 */ 237 #else /* CONFIG_PPC64 */
237 DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); 238 DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
238 239
239 /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ 240 /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
240 DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); 241 DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
241 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); 242 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
242 #endif /* CONFIG_PPC64 */ 243 #endif /* CONFIG_PPC64 */
243 244
244 DEFINE(CLONE_VM, CLONE_VM); 245 DEFINE(CLONE_VM, CLONE_VM);
245 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); 246 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
246 247
247 #ifndef CONFIG_PPC64 248 #ifndef CONFIG_PPC64
248 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); 249 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
249 #endif /* ! CONFIG_PPC64 */ 250 #endif /* ! CONFIG_PPC64 */
250 251
251 /* About the CPU features table */ 252 /* About the CPU features table */
252 DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec)); 253 DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
253 DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask)); 254 DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
254 DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value)); 255 DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
255 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); 256 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
256 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); 257 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
257 DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore)); 258 DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
258 259
259 #ifndef CONFIG_PPC64 260 #ifndef CONFIG_PPC64
260 DEFINE(pbe_address, offsetof(struct pbe, address)); 261 DEFINE(pbe_address, offsetof(struct pbe, address));
261 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); 262 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
262 DEFINE(pbe_next, offsetof(struct pbe, next)); 263 DEFINE(pbe_next, offsetof(struct pbe, next));
263 264
264 DEFINE(TASK_SIZE, TASK_SIZE); 265 DEFINE(TASK_SIZE, TASK_SIZE);
265 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); 266 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
266 #endif /* ! CONFIG_PPC64 */ 267 #endif /* ! CONFIG_PPC64 */
267 268
268 /* datapage offsets for use by vdso */ 269 /* datapage offsets for use by vdso */
269 DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp)); 270 DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp));
270 DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec)); 271 DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec));
271 DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs)); 272 DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs));
272 DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec)); 273 DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec));
273 DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count)); 274 DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count));
274 DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest)); 275 DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest));
275 DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); 276 DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
276 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); 277 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32));
277 DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); 278 DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
278 DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 279 DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
279 #ifdef CONFIG_PPC64 280 #ifdef CONFIG_PPC64
280 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64)); 281 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64));
281 DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); 282 DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
282 DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); 283 DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
283 DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); 284 DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
284 DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); 285 DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
285 DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec)); 286 DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec));
286 DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec)); 287 DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec));
287 DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec)); 288 DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec));
288 DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec)); 289 DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec));
289 #else 290 #else
290 DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec)); 291 DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec));
291 DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec)); 292 DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec));
292 DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec)); 293 DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec));
293 DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec)); 294 DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec));
294 #endif 295 #endif
295 /* timeval/timezone offsets for use by vdso */ 296 /* timeval/timezone offsets for use by vdso */
296 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); 297 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
297 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); 298 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
298 299
299 /* Other bits used by the vdso */ 300 /* Other bits used by the vdso */
300 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 301 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
301 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 302 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
302 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); 303 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
303 DEFINE(CLOCK_REALTIME_RES, TICK_NSEC); 304 DEFINE(CLOCK_REALTIME_RES, TICK_NSEC);
304 305
305 return 0; 306 return 0;
306 } 307 }
307 308
arch/powerpc/kernel/crash.c
1 /* 1 /*
2 * Architecture specific (PPC64) functions for kexec based crash dumps. 2 * Architecture specific (PPC64) functions for kexec based crash dumps.
3 * 3 *
4 * Copyright (C) 2005, IBM Corp. 4 * Copyright (C) 2005, IBM Corp.
5 * 5 *
6 * Created by: Haren Myneni 6 * Created by: Haren Myneni
7 * 7 *
8 * This source code is licensed under the GNU General Public License, 8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details. 9 * Version 2. See the file COPYING for more details.
10 * 10 *
11 */ 11 */
12 12
13 #undef DEBUG 13 #undef DEBUG
14 14
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/smp.h> 16 #include <linux/smp.h>
17 #include <linux/reboot.h> 17 #include <linux/reboot.h>
18 #include <linux/kexec.h> 18 #include <linux/kexec.h>
19 #include <linux/bootmem.h> 19 #include <linux/bootmem.h>
20 #include <linux/crash_dump.h> 20 #include <linux/crash_dump.h>
21 #include <linux/delay.h> 21 #include <linux/delay.h>
22 #include <linux/elf.h> 22 #include <linux/elf.h>
23 #include <linux/elfcore.h> 23 #include <linux/elfcore.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/irq.h> 25 #include <linux/irq.h>
26 #include <linux/types.h> 26 #include <linux/types.h>
27 #include <linux/irq.h> 27 #include <linux/irq.h>
28 28
29 #include <asm/processor.h> 29 #include <asm/processor.h>
30 #include <asm/machdep.h> 30 #include <asm/machdep.h>
31 #include <asm/kexec.h> 31 #include <asm/kexec.h>
32 #include <asm/kdump.h> 32 #include <asm/kdump.h>
33 #include <asm/lmb.h> 33 #include <asm/lmb.h>
34 #include <asm/firmware.h> 34 #include <asm/firmware.h>
35 #include <asm/smp.h> 35 #include <asm/smp.h>
36 36
37 #ifdef DEBUG 37 #ifdef DEBUG
38 #include <asm/udbg.h> 38 #include <asm/udbg.h>
39 #define DBG(fmt...) udbg_printf(fmt) 39 #define DBG(fmt...) udbg_printf(fmt)
40 #else 40 #else
41 #define DBG(fmt...) 41 #define DBG(fmt...)
42 #endif 42 #endif
43 43
44 /* This keeps a track of which one is crashing cpu. */ 44 /* This keeps a track of which one is crashing cpu. */
45 int crashing_cpu = -1; 45 int crashing_cpu = -1;
46 static cpumask_t cpus_in_crash = CPU_MASK_NONE; 46 static cpumask_t cpus_in_crash = CPU_MASK_NONE;
47 cpumask_t cpus_in_sr = CPU_MASK_NONE; 47 cpumask_t cpus_in_sr = CPU_MASK_NONE;
48 48
49 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, 49 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
50 size_t data_len) 50 size_t data_len)
51 { 51 {
52 struct elf_note note; 52 struct elf_note note;
53 53
54 note.n_namesz = strlen(name) + 1; 54 note.n_namesz = strlen(name) + 1;
55 note.n_descsz = data_len; 55 note.n_descsz = data_len;
56 note.n_type = type; 56 note.n_type = type;
57 memcpy(buf, &note, sizeof(note)); 57 memcpy(buf, &note, sizeof(note));
58 buf += (sizeof(note) +3)/4; 58 buf += (sizeof(note) +3)/4;
59 memcpy(buf, name, note.n_namesz); 59 memcpy(buf, name, note.n_namesz);
60 buf += (note.n_namesz + 3)/4; 60 buf += (note.n_namesz + 3)/4;
61 memcpy(buf, data, note.n_descsz); 61 memcpy(buf, data, note.n_descsz);
62 buf += (note.n_descsz + 3)/4; 62 buf += (note.n_descsz + 3)/4;
63 63
64 return buf; 64 return buf;
65 } 65 }
66 66
67 static void final_note(u32 *buf) 67 static void final_note(u32 *buf)
68 { 68 {
69 struct elf_note note; 69 struct elf_note note;
70 70
71 note.n_namesz = 0; 71 note.n_namesz = 0;
72 note.n_descsz = 0; 72 note.n_descsz = 0;
73 note.n_type = 0; 73 note.n_type = 0;
74 memcpy(buf, &note, sizeof(note)); 74 memcpy(buf, &note, sizeof(note));
75 } 75 }
76 76
77 static void crash_save_this_cpu(struct pt_regs *regs, int cpu) 77 static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
78 { 78 {
79 struct elf_prstatus prstatus; 79 struct elf_prstatus prstatus;
80 u32 *buf; 80 u32 *buf;
81 81
82 if ((cpu < 0) || (cpu >= NR_CPUS)) 82 if ((cpu < 0) || (cpu >= NR_CPUS))
83 return; 83 return;
84 84
85 /* Using ELF notes here is opportunistic. 85 /* Using ELF notes here is opportunistic.
86 * I need a well defined structure format 86 * I need a well defined structure format
87 * for the data I pass, and I need tags 87 * for the data I pass, and I need tags
88 * on the data to indicate what information I have 88 * on the data to indicate what information I have
89 * squirrelled away. ELF notes happen to provide 89 * squirrelled away. ELF notes happen to provide
90 * all of that that no need to invent something new. 90 * all of that that no need to invent something new.
91 */ 91 */
92 buf = (u32*)per_cpu_ptr(crash_notes, cpu); 92 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
93 if (!buf) 93 if (!buf)
94 return; 94 return;
95 95
96 memset(&prstatus, 0, sizeof(prstatus)); 96 memset(&prstatus, 0, sizeof(prstatus));
97 prstatus.pr_pid = current->pid; 97 prstatus.pr_pid = current->pid;
98 elf_core_copy_regs(&prstatus.pr_reg, regs); 98 elf_core_copy_regs(&prstatus.pr_reg, regs);
99 buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, 99 buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
100 sizeof(prstatus)); 100 sizeof(prstatus));
101 final_note(buf); 101 final_note(buf);
102 } 102 }
103 103
104 #ifdef CONFIG_SMP 104 #ifdef CONFIG_SMP
105 static atomic_t enter_on_soft_reset = ATOMIC_INIT(0); 105 static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
106 106
107 void crash_ipi_callback(struct pt_regs *regs) 107 void crash_ipi_callback(struct pt_regs *regs)
108 { 108 {
109 int cpu = smp_processor_id(); 109 int cpu = smp_processor_id();
110 110
111 if (!cpu_online(cpu)) 111 if (!cpu_online(cpu))
112 return; 112 return;
113 113
114 local_irq_disable(); 114 hard_irq_disable();
115 if (!cpu_isset(cpu, cpus_in_crash)) 115 if (!cpu_isset(cpu, cpus_in_crash))
116 crash_save_this_cpu(regs, cpu); 116 crash_save_this_cpu(regs, cpu);
117 cpu_set(cpu, cpus_in_crash); 117 cpu_set(cpu, cpus_in_crash);
118 118
119 /* 119 /*
120 * Entered via soft-reset - could be the kdump 120 * Entered via soft-reset - could be the kdump
121 * process is invoked using soft-reset or user activated 121 * process is invoked using soft-reset or user activated
122 * it if some CPU did not respond to an IPI. 122 * it if some CPU did not respond to an IPI.
123 * For soft-reset, the secondary CPU can enter this func 123 * For soft-reset, the secondary CPU can enter this func
124 * twice. 1 - using IPI, and 2. soft-reset. 124 * twice. 1 - using IPI, and 2. soft-reset.
125 * Tell the kexec CPU that entered via soft-reset and ready 125 * Tell the kexec CPU that entered via soft-reset and ready
126 * to go down. 126 * to go down.
127 */ 127 */
128 if (cpu_isset(cpu, cpus_in_sr)) { 128 if (cpu_isset(cpu, cpus_in_sr)) {
129 cpu_clear(cpu, cpus_in_sr); 129 cpu_clear(cpu, cpus_in_sr);
130 atomic_inc(&enter_on_soft_reset); 130 atomic_inc(&enter_on_soft_reset);
131 } 131 }
132 132
133 /* 133 /*
134 * Starting the kdump boot. 134 * Starting the kdump boot.
135 * This barrier is needed to make sure that all CPUs are stopped. 135 * This barrier is needed to make sure that all CPUs are stopped.
136 * If not, soft-reset will be invoked to bring other CPUs. 136 * If not, soft-reset will be invoked to bring other CPUs.
137 */ 137 */
138 while (!cpu_isset(crashing_cpu, cpus_in_crash)) 138 while (!cpu_isset(crashing_cpu, cpus_in_crash))
139 cpu_relax(); 139 cpu_relax();
140 140
141 if (ppc_md.kexec_cpu_down) 141 if (ppc_md.kexec_cpu_down)
142 ppc_md.kexec_cpu_down(1, 1); 142 ppc_md.kexec_cpu_down(1, 1);
143 143
144 #ifdef CONFIG_PPC64 144 #ifdef CONFIG_PPC64
145 kexec_smp_wait(); 145 kexec_smp_wait();
146 #else 146 #else
147 for (;;); /* FIXME */ 147 for (;;); /* FIXME */
148 #endif 148 #endif
149 149
150 /* NOTREACHED */ 150 /* NOTREACHED */
151 } 151 }
152 152
153 /* 153 /*
154 * Wait until all CPUs are entered via soft-reset. 154 * Wait until all CPUs are entered via soft-reset.
155 */ 155 */
156 static void crash_soft_reset_check(int cpu) 156 static void crash_soft_reset_check(int cpu)
157 { 157 {
158 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ 158 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
159 159
160 cpu_clear(cpu, cpus_in_sr); 160 cpu_clear(cpu, cpus_in_sr);
161 while (atomic_read(&enter_on_soft_reset) != ncpus) 161 while (atomic_read(&enter_on_soft_reset) != ncpus)
162 cpu_relax(); 162 cpu_relax();
163 } 163 }
164 164
165 165
166 static void crash_kexec_prepare_cpus(int cpu) 166 static void crash_kexec_prepare_cpus(int cpu)
167 { 167 {
168 unsigned int msecs; 168 unsigned int msecs;
169 169
170 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ 170 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
171 171
172 crash_send_ipi(crash_ipi_callback); 172 crash_send_ipi(crash_ipi_callback);
173 smp_wmb(); 173 smp_wmb();
174 174
175 /* 175 /*
176 * FIXME: Until we will have the way to stop other CPUSs reliabally, 176 * FIXME: Until we will have the way to stop other CPUSs reliabally,
177 * the crash CPU will send an IPI and wait for other CPUs to 177 * the crash CPU will send an IPI and wait for other CPUs to
178 * respond. 178 * respond.
179 * Delay of at least 10 seconds. 179 * Delay of at least 10 seconds.
180 */ 180 */
181 printk(KERN_EMERG "Sending IPI to other cpus...\n"); 181 printk(KERN_EMERG "Sending IPI to other cpus...\n");
182 msecs = 10000; 182 msecs = 10000;
183 while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { 183 while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
184 cpu_relax(); 184 cpu_relax();
185 mdelay(1); 185 mdelay(1);
186 } 186 }
187 187
188 /* Would it be better to replace the trap vector here? */ 188 /* Would it be better to replace the trap vector here? */
189 189
190 /* 190 /*
191 * FIXME: In case if we do not get all CPUs, one possibility: ask the 191 * FIXME: In case if we do not get all CPUs, one possibility: ask the
192 * user to do soft reset such that we get all. 192 * user to do soft reset such that we get all.
193 * Soft-reset will be used until better mechanism is implemented. 193 * Soft-reset will be used until better mechanism is implemented.
194 */ 194 */
195 if (cpus_weight(cpus_in_crash) < ncpus) { 195 if (cpus_weight(cpus_in_crash) < ncpus) {
196 printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", 196 printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
197 ncpus - cpus_weight(cpus_in_crash)); 197 ncpus - cpus_weight(cpus_in_crash));
198 printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); 198 printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
199 cpus_in_sr = CPU_MASK_NONE; 199 cpus_in_sr = CPU_MASK_NONE;
200 atomic_set(&enter_on_soft_reset, 0); 200 atomic_set(&enter_on_soft_reset, 0);
201 while (cpus_weight(cpus_in_crash) < ncpus) 201 while (cpus_weight(cpus_in_crash) < ncpus)
202 cpu_relax(); 202 cpu_relax();
203 } 203 }
204 /* 204 /*
205 * Make sure all CPUs are entered via soft-reset if the kdump is 205 * Make sure all CPUs are entered via soft-reset if the kdump is
206 * invoked using soft-reset. 206 * invoked using soft-reset.
207 */ 207 */
208 if (cpu_isset(cpu, cpus_in_sr)) 208 if (cpu_isset(cpu, cpus_in_sr))
209 crash_soft_reset_check(cpu); 209 crash_soft_reset_check(cpu);
210 /* Leave the IPI callback set */ 210 /* Leave the IPI callback set */
211 } 211 }
212 212
213 /* 213 /*
214 * This function will be called by secondary cpus or by kexec cpu 214 * This function will be called by secondary cpus or by kexec cpu
215 * if soft-reset is activated to stop some CPUs. 215 * if soft-reset is activated to stop some CPUs.
216 */ 216 */
217 void crash_kexec_secondary(struct pt_regs *regs) 217 void crash_kexec_secondary(struct pt_regs *regs)
218 { 218 {
219 int cpu = smp_processor_id(); 219 int cpu = smp_processor_id();
220 unsigned long flags; 220 unsigned long flags;
221 int msecs = 5; 221 int msecs = 5;
222 222
223 local_irq_save(flags); 223 local_irq_save(flags);
224 /* Wait 5ms if the kexec CPU is not entered yet. */ 224 /* Wait 5ms if the kexec CPU is not entered yet. */
225 while (crashing_cpu < 0) { 225 while (crashing_cpu < 0) {
226 if (--msecs < 0) { 226 if (--msecs < 0) {
227 /* 227 /*
228 * Either kdump image is not loaded or 228 * Either kdump image is not loaded or
229 * kdump process is not started - Probably xmon 229 * kdump process is not started - Probably xmon
230 * exited using 'x'(exit and recover) or 230 * exited using 'x'(exit and recover) or
231 * kexec_should_crash() failed for all running tasks. 231 * kexec_should_crash() failed for all running tasks.
232 */ 232 */
233 cpu_clear(cpu, cpus_in_sr); 233 cpu_clear(cpu, cpus_in_sr);
234 local_irq_restore(flags); 234 local_irq_restore(flags);
235 return; 235 return;
236 } 236 }
237 mdelay(1); 237 mdelay(1);
238 cpu_relax(); 238 cpu_relax();
239 } 239 }
240 if (cpu == crashing_cpu) { 240 if (cpu == crashing_cpu) {
241 /* 241 /*
242 * Panic CPU will enter this func only via soft-reset. 242 * Panic CPU will enter this func only via soft-reset.
243 * Wait until all secondary CPUs entered and 243 * Wait until all secondary CPUs entered and
244 * then start kexec boot. 244 * then start kexec boot.
245 */ 245 */
246 crash_soft_reset_check(cpu); 246 crash_soft_reset_check(cpu);
247 cpu_set(crashing_cpu, cpus_in_crash); 247 cpu_set(crashing_cpu, cpus_in_crash);
248 if (ppc_md.kexec_cpu_down) 248 if (ppc_md.kexec_cpu_down)
249 ppc_md.kexec_cpu_down(1, 0); 249 ppc_md.kexec_cpu_down(1, 0);
250 machine_kexec(kexec_crash_image); 250 machine_kexec(kexec_crash_image);
251 /* NOTREACHED */ 251 /* NOTREACHED */
252 } 252 }
253 crash_ipi_callback(regs); 253 crash_ipi_callback(regs);
254 } 254 }
255 255
256 #else 256 #else
257 static void crash_kexec_prepare_cpus(int cpu) 257 static void crash_kexec_prepare_cpus(int cpu)
258 { 258 {
259 /* 259 /*
260 * move the secondarys to us so that we can copy 260 * move the secondarys to us so that we can copy
261 * the new kernel 0-0x100 safely 261 * the new kernel 0-0x100 safely
262 * 262 *
263 * do this if kexec in setup.c ? 263 * do this if kexec in setup.c ?
264 */ 264 */
265 #ifdef CONFIG_PPC64 265 #ifdef CONFIG_PPC64
266 smp_release_cpus(); 266 smp_release_cpus();
267 #else 267 #else
268 /* FIXME */ 268 /* FIXME */
269 #endif 269 #endif
270 } 270 }
271 271
272 void crash_kexec_secondary(struct pt_regs *regs) 272 void crash_kexec_secondary(struct pt_regs *regs)
273 { 273 {
274 cpus_in_sr = CPU_MASK_NONE; 274 cpus_in_sr = CPU_MASK_NONE;
275 } 275 }
276 #endif 276 #endif
277 277
278 void default_machine_crash_shutdown(struct pt_regs *regs) 278 void default_machine_crash_shutdown(struct pt_regs *regs)
279 { 279 {
280 unsigned int irq; 280 unsigned int irq;
281 281
282 /* 282 /*
283 * This function is only called after the system 283 * This function is only called after the system
284 * has panicked or is otherwise in a critical state. 284 * has panicked or is otherwise in a critical state.
285 * The minimum amount of code to allow a kexec'd kernel 285 * The minimum amount of code to allow a kexec'd kernel
286 * to run successfully needs to happen here. 286 * to run successfully needs to happen here.
287 * 287 *
288 * In practice this means stopping other cpus in 288 * In practice this means stopping other cpus in
289 * an SMP system. 289 * an SMP system.
290 * The kernel is broken so disable interrupts. 290 * The kernel is broken so disable interrupts.
291 */ 291 */
292 local_irq_disable(); 292 hard_irq_disable();
293 293
294 for_each_irq(irq) { 294 for_each_irq(irq) {
295 struct irq_desc *desc = irq_desc + irq; 295 struct irq_desc *desc = irq_desc + irq;
296 296
297 if (desc->status & IRQ_INPROGRESS) 297 if (desc->status & IRQ_INPROGRESS)
298 desc->chip->eoi(irq); 298 desc->chip->eoi(irq);
299 299
300 if (!(desc->status & IRQ_DISABLED)) 300 if (!(desc->status & IRQ_DISABLED))
301 desc->chip->disable(irq); 301 desc->chip->disable(irq);
302 } 302 }
303 303
304 /* 304 /*
305 * Make a note of crashing cpu. Will be used in machine_kexec 305 * Make a note of crashing cpu. Will be used in machine_kexec
306 * such that another IPI will not be sent. 306 * such that another IPI will not be sent.
307 */ 307 */
308 crashing_cpu = smp_processor_id(); 308 crashing_cpu = smp_processor_id();
309 crash_save_this_cpu(regs, crashing_cpu); 309 crash_save_this_cpu(regs, crashing_cpu);
310 crash_kexec_prepare_cpus(crashing_cpu); 310 crash_kexec_prepare_cpus(crashing_cpu);
311 cpu_set(crashing_cpu, cpus_in_crash); 311 cpu_set(crashing_cpu, cpus_in_crash);
312 if (ppc_md.kexec_cpu_down) 312 if (ppc_md.kexec_cpu_down)
313 ppc_md.kexec_cpu_down(1, 0); 313 ppc_md.kexec_cpu_down(1, 0);
314 } 314 }
315 315
arch/powerpc/kernel/entry_64.S
1 /* 1 /*
2 * PowerPC version 2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras. 6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support 7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras. 8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras. 9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). 10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 * 11 *
12 * This file contains the system call entry code, context switch 12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC. 13 * code, and exception/interrupt return code for PowerPC.
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License 16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 */ 19 */
20 20
21 #include <linux/errno.h> 21 #include <linux/errno.h>
22 #include <asm/unistd.h> 22 #include <asm/unistd.h>
23 #include <asm/processor.h> 23 #include <asm/processor.h>
24 #include <asm/page.h> 24 #include <asm/page.h>
25 #include <asm/mmu.h> 25 #include <asm/mmu.h>
26 #include <asm/thread_info.h> 26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h> 27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h> 28 #include <asm/asm-offsets.h>
29 #include <asm/cputable.h> 29 #include <asm/cputable.h>
30 #include <asm/firmware.h> 30 #include <asm/firmware.h>
31 31
32 /* 32 /*
33 * System calls. 33 * System calls.
34 */ 34 */
35 .section ".toc","aw" 35 .section ".toc","aw"
36 .SYS_CALL_TABLE: 36 .SYS_CALL_TABLE:
37 .tc .sys_call_table[TC],.sys_call_table 37 .tc .sys_call_table[TC],.sys_call_table
38 38
39 /* This value is used to mark exception frames on the stack. */ 39 /* This value is used to mark exception frames on the stack. */
40 exception_marker: 40 exception_marker:
41 .tc ID_72656773_68657265[TC],0x7265677368657265 41 .tc ID_72656773_68657265[TC],0x7265677368657265
42 42
43 .section ".text" 43 .section ".text"
44 .align 7 44 .align 7
45 45
46 #undef SHOW_SYSCALLS 46 #undef SHOW_SYSCALLS
47 47
48 .globl system_call_common 48 .globl system_call_common
49 system_call_common: 49 system_call_common:
50 andi. r10,r12,MSR_PR 50 andi. r10,r12,MSR_PR
51 mr r10,r1 51 mr r10,r1
52 addi r1,r1,-INT_FRAME_SIZE 52 addi r1,r1,-INT_FRAME_SIZE
53 beq- 1f 53 beq- 1f
54 ld r1,PACAKSAVE(r13) 54 ld r1,PACAKSAVE(r13)
55 1: std r10,0(r1) 55 1: std r10,0(r1)
56 crclr so 56 crclr so
57 std r11,_NIP(r1) 57 std r11,_NIP(r1)
58 std r12,_MSR(r1) 58 std r12,_MSR(r1)
59 std r0,GPR0(r1) 59 std r0,GPR0(r1)
60 std r10,GPR1(r1) 60 std r10,GPR1(r1)
61 ACCOUNT_CPU_USER_ENTRY(r10, r11) 61 ACCOUNT_CPU_USER_ENTRY(r10, r11)
62 std r2,GPR2(r1) 62 std r2,GPR2(r1)
63 std r3,GPR3(r1) 63 std r3,GPR3(r1)
64 std r4,GPR4(r1) 64 std r4,GPR4(r1)
65 std r5,GPR5(r1) 65 std r5,GPR5(r1)
66 std r6,GPR6(r1) 66 std r6,GPR6(r1)
67 std r7,GPR7(r1) 67 std r7,GPR7(r1)
68 std r8,GPR8(r1) 68 std r8,GPR8(r1)
69 li r11,0 69 li r11,0
70 std r11,GPR9(r1) 70 std r11,GPR9(r1)
71 std r11,GPR10(r1) 71 std r11,GPR10(r1)
72 std r11,GPR11(r1) 72 std r11,GPR11(r1)
73 std r11,GPR12(r1) 73 std r11,GPR12(r1)
74 std r9,GPR13(r1) 74 std r9,GPR13(r1)
75 mfcr r9 75 mfcr r9
76 mflr r10 76 mflr r10
77 li r11,0xc01 77 li r11,0xc01
78 std r9,_CCR(r1) 78 std r9,_CCR(r1)
79 std r10,_LINK(r1) 79 std r10,_LINK(r1)
80 std r11,_TRAP(r1) 80 std r11,_TRAP(r1)
81 mfxer r9 81 mfxer r9
82 mfctr r10 82 mfctr r10
83 std r9,_XER(r1) 83 std r9,_XER(r1)
84 std r10,_CTR(r1) 84 std r10,_CTR(r1)
85 std r3,ORIG_GPR3(r1) 85 std r3,ORIG_GPR3(r1)
86 ld r2,PACATOC(r13) 86 ld r2,PACATOC(r13)
87 addi r9,r1,STACK_FRAME_OVERHEAD 87 addi r9,r1,STACK_FRAME_OVERHEAD
88 ld r11,exception_marker@toc(r2) 88 ld r11,exception_marker@toc(r2)
89 std r11,-16(r9) /* "regshere" marker */ 89 std r11,-16(r9) /* "regshere" marker */
90 li r10,1
91 stb r10,PACASOFTIRQEN(r13)
92 stb r10,PACAHARDIRQEN(r13)
93 std r10,SOFTE(r1)
90 #ifdef CONFIG_PPC_ISERIES 94 #ifdef CONFIG_PPC_ISERIES
91 BEGIN_FW_FTR_SECTION 95 BEGIN_FW_FTR_SECTION
92 /* Hack for handling interrupts when soft-enabling on iSeries */ 96 /* Hack for handling interrupts when soft-enabling on iSeries */
93 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ 97 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
94 andi. r10,r12,MSR_PR /* from kernel */ 98 andi. r10,r12,MSR_PR /* from kernel */
95 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq 99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
96 beq hardware_interrupt_entry 100 beq hardware_interrupt_entry
97 lbz r10,PACAPROCENABLED(r13)
98 std r10,SOFTE(r1)
99 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 101 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
100 #endif 102 #endif
101 mfmsr r11 103 mfmsr r11
102 ori r11,r11,MSR_EE 104 ori r11,r11,MSR_EE
103 mtmsrd r11,1 105 mtmsrd r11,1
104 106
105 #ifdef SHOW_SYSCALLS 107 #ifdef SHOW_SYSCALLS
106 bl .do_show_syscall 108 bl .do_show_syscall
107 REST_GPR(0,r1) 109 REST_GPR(0,r1)
108 REST_4GPRS(3,r1) 110 REST_4GPRS(3,r1)
109 REST_2GPRS(7,r1) 111 REST_2GPRS(7,r1)
110 addi r9,r1,STACK_FRAME_OVERHEAD 112 addi r9,r1,STACK_FRAME_OVERHEAD
111 #endif 113 #endif
112 clrrdi r11,r1,THREAD_SHIFT 114 clrrdi r11,r1,THREAD_SHIFT
113 ld r10,TI_FLAGS(r11) 115 ld r10,TI_FLAGS(r11)
114 andi. r11,r10,_TIF_SYSCALL_T_OR_A 116 andi. r11,r10,_TIF_SYSCALL_T_OR_A
115 bne- syscall_dotrace 117 bne- syscall_dotrace
116 syscall_dotrace_cont: 118 syscall_dotrace_cont:
117 cmpldi 0,r0,NR_syscalls 119 cmpldi 0,r0,NR_syscalls
118 bge- syscall_enosys 120 bge- syscall_enosys
119 121
120 system_call: /* label this so stack traces look sane */ 122 system_call: /* label this so stack traces look sane */
121 /* 123 /*
122 * Need to vector to 32 Bit or default sys_call_table here, 124 * Need to vector to 32 Bit or default sys_call_table here,
123 * based on caller's run-mode / personality. 125 * based on caller's run-mode / personality.
124 */ 126 */
125 ld r11,.SYS_CALL_TABLE@toc(2) 127 ld r11,.SYS_CALL_TABLE@toc(2)
126 andi. r10,r10,_TIF_32BIT 128 andi. r10,r10,_TIF_32BIT
127 beq 15f 129 beq 15f
128 addi r11,r11,8 /* use 32-bit syscall entries */ 130 addi r11,r11,8 /* use 32-bit syscall entries */
129 clrldi r3,r3,32 131 clrldi r3,r3,32
130 clrldi r4,r4,32 132 clrldi r4,r4,32
131 clrldi r5,r5,32 133 clrldi r5,r5,32
132 clrldi r6,r6,32 134 clrldi r6,r6,32
133 clrldi r7,r7,32 135 clrldi r7,r7,32
134 clrldi r8,r8,32 136 clrldi r8,r8,32
135 15: 137 15:
136 slwi r0,r0,4 138 slwi r0,r0,4
137 ldx r10,r11,r0 /* Fetch system call handler [ptr] */ 139 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
138 mtctr r10 140 mtctr r10
139 bctrl /* Call handler */ 141 bctrl /* Call handler */
140 142
141 syscall_exit: 143 syscall_exit:
142 std r3,RESULT(r1) 144 std r3,RESULT(r1)
143 #ifdef SHOW_SYSCALLS 145 #ifdef SHOW_SYSCALLS
144 bl .do_show_syscall_exit 146 bl .do_show_syscall_exit
145 ld r3,RESULT(r1) 147 ld r3,RESULT(r1)
146 #endif 148 #endif
147 clrrdi r12,r1,THREAD_SHIFT 149 clrrdi r12,r1,THREAD_SHIFT
148 150
149 /* disable interrupts so current_thread_info()->flags can't change, 151 /* disable interrupts so current_thread_info()->flags can't change,
150 and so that we don't get interrupted after loading SRR0/1. */ 152 and so that we don't get interrupted after loading SRR0/1. */
151 ld r8,_MSR(r1) 153 ld r8,_MSR(r1)
152 andi. r10,r8,MSR_RI 154 andi. r10,r8,MSR_RI
153 beq- unrecov_restore 155 beq- unrecov_restore
154 mfmsr r10 156 mfmsr r10
155 rldicl r10,r10,48,1 157 rldicl r10,r10,48,1
156 rotldi r10,r10,16 158 rotldi r10,r10,16
157 mtmsrd r10,1 159 mtmsrd r10,1
158 ld r9,TI_FLAGS(r12) 160 ld r9,TI_FLAGS(r12)
159 li r11,-_LAST_ERRNO 161 li r11,-_LAST_ERRNO
160 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) 162 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
161 bne- syscall_exit_work 163 bne- syscall_exit_work
162 cmpld r3,r11 164 cmpld r3,r11
163 ld r5,_CCR(r1) 165 ld r5,_CCR(r1)
164 bge- syscall_error 166 bge- syscall_error
165 syscall_error_cont: 167 syscall_error_cont:
166 ld r7,_NIP(r1) 168 ld r7,_NIP(r1)
167 stdcx. r0,0,r1 /* to clear the reservation */ 169 stdcx. r0,0,r1 /* to clear the reservation */
168 andi. r6,r8,MSR_PR 170 andi. r6,r8,MSR_PR
169 ld r4,_LINK(r1) 171 ld r4,_LINK(r1)
170 beq- 1f 172 beq- 1f
171 ACCOUNT_CPU_USER_EXIT(r11, r12) 173 ACCOUNT_CPU_USER_EXIT(r11, r12)
172 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 174 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
173 1: ld r2,GPR2(r1) 175 1: ld r2,GPR2(r1)
174 li r12,MSR_RI 176 li r12,MSR_RI
175 andc r11,r10,r12 177 andc r11,r10,r12
176 mtmsrd r11,1 /* clear MSR.RI */ 178 mtmsrd r11,1 /* clear MSR.RI */
177 ld r1,GPR1(r1) 179 ld r1,GPR1(r1)
178 mtlr r4 180 mtlr r4
179 mtcr r5 181 mtcr r5
180 mtspr SPRN_SRR0,r7 182 mtspr SPRN_SRR0,r7
181 mtspr SPRN_SRR1,r8 183 mtspr SPRN_SRR1,r8
182 rfid 184 rfid
183 b . /* prevent speculative execution */ 185 b . /* prevent speculative execution */
184 186
185 syscall_error: 187 syscall_error:
186 oris r5,r5,0x1000 /* Set SO bit in CR */ 188 oris r5,r5,0x1000 /* Set SO bit in CR */
187 neg r3,r3 189 neg r3,r3
188 std r5,_CCR(r1) 190 std r5,_CCR(r1)
189 b syscall_error_cont 191 b syscall_error_cont
190 192
191 /* Traced system call support */ 193 /* Traced system call support */
192 syscall_dotrace: 194 syscall_dotrace:
193 bl .save_nvgprs 195 bl .save_nvgprs
194 addi r3,r1,STACK_FRAME_OVERHEAD 196 addi r3,r1,STACK_FRAME_OVERHEAD
195 bl .do_syscall_trace_enter 197 bl .do_syscall_trace_enter
196 ld r0,GPR0(r1) /* Restore original registers */ 198 ld r0,GPR0(r1) /* Restore original registers */
197 ld r3,GPR3(r1) 199 ld r3,GPR3(r1)
198 ld r4,GPR4(r1) 200 ld r4,GPR4(r1)
199 ld r5,GPR5(r1) 201 ld r5,GPR5(r1)
200 ld r6,GPR6(r1) 202 ld r6,GPR6(r1)
201 ld r7,GPR7(r1) 203 ld r7,GPR7(r1)
202 ld r8,GPR8(r1) 204 ld r8,GPR8(r1)
203 addi r9,r1,STACK_FRAME_OVERHEAD 205 addi r9,r1,STACK_FRAME_OVERHEAD
204 clrrdi r10,r1,THREAD_SHIFT 206 clrrdi r10,r1,THREAD_SHIFT
205 ld r10,TI_FLAGS(r10) 207 ld r10,TI_FLAGS(r10)
206 b syscall_dotrace_cont 208 b syscall_dotrace_cont
207 209
208 syscall_enosys: 210 syscall_enosys:
209 li r3,-ENOSYS 211 li r3,-ENOSYS
210 b syscall_exit 212 b syscall_exit
211 213
212 syscall_exit_work: 214 syscall_exit_work:
213 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. 215 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
214 If TIF_NOERROR is set, just save r3 as it is. */ 216 If TIF_NOERROR is set, just save r3 as it is. */
215 217
216 andi. r0,r9,_TIF_RESTOREALL 218 andi. r0,r9,_TIF_RESTOREALL
217 beq+ 0f 219 beq+ 0f
218 REST_NVGPRS(r1) 220 REST_NVGPRS(r1)
219 b 2f 221 b 2f
220 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */ 222 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */
221 blt+ 1f 223 blt+ 1f
222 andi. r0,r9,_TIF_NOERROR 224 andi. r0,r9,_TIF_NOERROR
223 bne- 1f 225 bne- 1f
224 ld r5,_CCR(r1) 226 ld r5,_CCR(r1)
225 neg r3,r3 227 neg r3,r3
226 oris r5,r5,0x1000 /* Set SO bit in CR */ 228 oris r5,r5,0x1000 /* Set SO bit in CR */
227 std r5,_CCR(r1) 229 std r5,_CCR(r1)
228 1: std r3,GPR3(r1) 230 1: std r3,GPR3(r1)
229 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) 231 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
230 beq 4f 232 beq 4f
231 233
232 /* Clear per-syscall TIF flags if any are set. */ 234 /* Clear per-syscall TIF flags if any are set. */
233 235
234 li r11,_TIF_PERSYSCALL_MASK 236 li r11,_TIF_PERSYSCALL_MASK
235 addi r12,r12,TI_FLAGS 237 addi r12,r12,TI_FLAGS
236 3: ldarx r10,0,r12 238 3: ldarx r10,0,r12
237 andc r10,r10,r11 239 andc r10,r10,r11
238 stdcx. r10,0,r12 240 stdcx. r10,0,r12
239 bne- 3b 241 bne- 3b
240 subi r12,r12,TI_FLAGS 242 subi r12,r12,TI_FLAGS
241 243
242 4: /* Anything else left to do? */ 244 4: /* Anything else left to do? */
243 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) 245 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
244 beq .ret_from_except_lite 246 beq .ret_from_except_lite
245 247
246 /* Re-enable interrupts */ 248 /* Re-enable interrupts */
247 mfmsr r10 249 mfmsr r10
248 ori r10,r10,MSR_EE 250 ori r10,r10,MSR_EE
249 mtmsrd r10,1 251 mtmsrd r10,1
250 252
251 bl .save_nvgprs 253 bl .save_nvgprs
252 addi r3,r1,STACK_FRAME_OVERHEAD 254 addi r3,r1,STACK_FRAME_OVERHEAD
253 bl .do_syscall_trace_leave 255 bl .do_syscall_trace_leave
254 b .ret_from_except 256 b .ret_from_except
255 257
256 /* Save non-volatile GPRs, if not already saved. */ 258 /* Save non-volatile GPRs, if not already saved. */
257 _GLOBAL(save_nvgprs) 259 _GLOBAL(save_nvgprs)
258 ld r11,_TRAP(r1) 260 ld r11,_TRAP(r1)
259 andi. r0,r11,1 261 andi. r0,r11,1
260 beqlr- 262 beqlr-
261 SAVE_NVGPRS(r1) 263 SAVE_NVGPRS(r1)
262 clrrdi r0,r11,1 264 clrrdi r0,r11,1
263 std r0,_TRAP(r1) 265 std r0,_TRAP(r1)
264 blr 266 blr
265 267
266 268
267 /* 269 /*
268 * The sigsuspend and rt_sigsuspend system calls can call do_signal 270 * The sigsuspend and rt_sigsuspend system calls can call do_signal
269 * and thus put the process into the stopped state where we might 271 * and thus put the process into the stopped state where we might
270 * want to examine its user state with ptrace. Therefore we need 272 * want to examine its user state with ptrace. Therefore we need
271 * to save all the nonvolatile registers (r14 - r31) before calling 273 * to save all the nonvolatile registers (r14 - r31) before calling
272 * the C code. Similarly, fork, vfork and clone need the full 274 * the C code. Similarly, fork, vfork and clone need the full
273 * register state on the stack so that it can be copied to the child. 275 * register state on the stack so that it can be copied to the child.
274 */ 276 */
275 277
276 _GLOBAL(ppc_fork) 278 _GLOBAL(ppc_fork)
277 bl .save_nvgprs 279 bl .save_nvgprs
278 bl .sys_fork 280 bl .sys_fork
279 b syscall_exit 281 b syscall_exit
280 282
281 _GLOBAL(ppc_vfork) 283 _GLOBAL(ppc_vfork)
282 bl .save_nvgprs 284 bl .save_nvgprs
283 bl .sys_vfork 285 bl .sys_vfork
284 b syscall_exit 286 b syscall_exit
285 287
286 _GLOBAL(ppc_clone) 288 _GLOBAL(ppc_clone)
287 bl .save_nvgprs 289 bl .save_nvgprs
288 bl .sys_clone 290 bl .sys_clone
289 b syscall_exit 291 b syscall_exit
290 292
291 _GLOBAL(ppc32_swapcontext) 293 _GLOBAL(ppc32_swapcontext)
292 bl .save_nvgprs 294 bl .save_nvgprs
293 bl .compat_sys_swapcontext 295 bl .compat_sys_swapcontext
294 b syscall_exit 296 b syscall_exit
295 297
296 _GLOBAL(ppc64_swapcontext) 298 _GLOBAL(ppc64_swapcontext)
297 bl .save_nvgprs 299 bl .save_nvgprs
298 bl .sys_swapcontext 300 bl .sys_swapcontext
299 b syscall_exit 301 b syscall_exit
300 302
301 _GLOBAL(ret_from_fork) 303 _GLOBAL(ret_from_fork)
302 bl .schedule_tail 304 bl .schedule_tail
303 REST_NVGPRS(r1) 305 REST_NVGPRS(r1)
304 li r3,0 306 li r3,0
305 b syscall_exit 307 b syscall_exit
306 308
307 /* 309 /*
308 * This routine switches between two different tasks. The process 310 * This routine switches between two different tasks. The process
309 * state of one is saved on its kernel stack. Then the state 311 * state of one is saved on its kernel stack. Then the state
310 * of the other is restored from its kernel stack. The memory 312 * of the other is restored from its kernel stack. The memory
311 * management hardware is updated to the second process's state. 313 * management hardware is updated to the second process's state.
312 * Finally, we can return to the second process, via ret_from_except. 314 * Finally, we can return to the second process, via ret_from_except.
313 * On entry, r3 points to the THREAD for the current task, r4 315 * On entry, r3 points to the THREAD for the current task, r4
314 * points to the THREAD for the new task. 316 * points to the THREAD for the new task.
315 * 317 *
316 * Note: there are two ways to get to the "going out" portion 318 * Note: there are two ways to get to the "going out" portion
317 * of this code; either by coming in via the entry (_switch) 319 * of this code; either by coming in via the entry (_switch)
318 * or via "fork" which must set up an environment equivalent 320 * or via "fork" which must set up an environment equivalent
319 * to the "_switch" path. If you change this you'll have to change 321 * to the "_switch" path. If you change this you'll have to change
320 * the fork code also. 322 * the fork code also.
321 * 323 *
322 * The code which creates the new task context is in 'copy_thread' 324 * The code which creates the new task context is in 'copy_thread'
323 * in arch/powerpc/kernel/process.c 325 * in arch/powerpc/kernel/process.c
324 */ 326 */
325 .align 7 327 .align 7
326 _GLOBAL(_switch) 328 _GLOBAL(_switch)
327 mflr r0 329 mflr r0
328 std r0,16(r1) 330 std r0,16(r1)
329 stdu r1,-SWITCH_FRAME_SIZE(r1) 331 stdu r1,-SWITCH_FRAME_SIZE(r1)
330 /* r3-r13 are caller saved -- Cort */ 332 /* r3-r13 are caller saved -- Cort */
331 SAVE_8GPRS(14, r1) 333 SAVE_8GPRS(14, r1)
332 SAVE_10GPRS(22, r1) 334 SAVE_10GPRS(22, r1)
333 mflr r20 /* Return to switch caller */ 335 mflr r20 /* Return to switch caller */
334 mfmsr r22 336 mfmsr r22
335 li r0, MSR_FP 337 li r0, MSR_FP
336 #ifdef CONFIG_ALTIVEC 338 #ifdef CONFIG_ALTIVEC
337 BEGIN_FTR_SECTION 339 BEGIN_FTR_SECTION
338 oris r0,r0,MSR_VEC@h /* Disable altivec */ 340 oris r0,r0,MSR_VEC@h /* Disable altivec */
339 mfspr r24,SPRN_VRSAVE /* save vrsave register value */ 341 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
340 std r24,THREAD_VRSAVE(r3) 342 std r24,THREAD_VRSAVE(r3)
341 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 343 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
342 #endif /* CONFIG_ALTIVEC */ 344 #endif /* CONFIG_ALTIVEC */
343 and. r0,r0,r22 345 and. r0,r0,r22
344 beq+ 1f 346 beq+ 1f
345 andc r22,r22,r0 347 andc r22,r22,r0
346 mtmsrd r22 348 mtmsrd r22
347 isync 349 isync
348 1: std r20,_NIP(r1) 350 1: std r20,_NIP(r1)
349 mfcr r23 351 mfcr r23
350 std r23,_CCR(r1) 352 std r23,_CCR(r1)
351 std r1,KSP(r3) /* Set old stack pointer */ 353 std r1,KSP(r3) /* Set old stack pointer */
352 354
353 #ifdef CONFIG_SMP 355 #ifdef CONFIG_SMP
354 /* We need a sync somewhere here to make sure that if the 356 /* We need a sync somewhere here to make sure that if the
355 * previous task gets rescheduled on another CPU, it sees all 357 * previous task gets rescheduled on another CPU, it sees all
356 * stores it has performed on this one. 358 * stores it has performed on this one.
357 */ 359 */
358 sync 360 sync
359 #endif /* CONFIG_SMP */ 361 #endif /* CONFIG_SMP */
360 362
361 addi r6,r4,-THREAD /* Convert THREAD to 'current' */ 363 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
362 std r6,PACACURRENT(r13) /* Set new 'current' */ 364 std r6,PACACURRENT(r13) /* Set new 'current' */
363 365
364 ld r8,KSP(r4) /* new stack pointer */ 366 ld r8,KSP(r4) /* new stack pointer */
365 BEGIN_FTR_SECTION 367 BEGIN_FTR_SECTION
366 clrrdi r6,r8,28 /* get its ESID */ 368 clrrdi r6,r8,28 /* get its ESID */
367 clrrdi r9,r1,28 /* get current sp ESID */ 369 clrrdi r9,r1,28 /* get current sp ESID */
368 clrldi. r0,r6,2 /* is new ESID c00000000? */ 370 clrldi. r0,r6,2 /* is new ESID c00000000? */
369 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ 371 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
370 cror eq,4*cr1+eq,eq 372 cror eq,4*cr1+eq,eq
371 beq 2f /* if yes, don't slbie it */ 373 beq 2f /* if yes, don't slbie it */
372 374
373 /* Bolt in the new stack SLB entry */ 375 /* Bolt in the new stack SLB entry */
374 ld r7,KSP_VSID(r4) /* Get new stack's VSID */ 376 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
375 oris r0,r6,(SLB_ESID_V)@h 377 oris r0,r6,(SLB_ESID_V)@h
376 ori r0,r0,(SLB_NUM_BOLTED-1)@l 378 ori r0,r0,(SLB_NUM_BOLTED-1)@l
377 379
378 /* Update the last bolted SLB */ 380 /* Update the last bolted SLB */
379 ld r9,PACA_SLBSHADOWPTR(r13) 381 ld r9,PACA_SLBSHADOWPTR(r13)
380 li r12,0 382 li r12,0
381 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ 383 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
382 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 384 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
383 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 385 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
384 386
385 slbie r6 387 slbie r6
386 slbie r6 /* Workaround POWER5 < DD2.1 issue */ 388 slbie r6 /* Workaround POWER5 < DD2.1 issue */
387 slbmte r7,r0 389 slbmte r7,r0
388 isync 390 isync
389 391
390 2: 392 2:
391 END_FTR_SECTION_IFSET(CPU_FTR_SLB) 393 END_FTR_SECTION_IFSET(CPU_FTR_SLB)
392 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ 394 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
393 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE 395 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
394 because we don't need to leave the 288-byte ABI gap at the 396 because we don't need to leave the 288-byte ABI gap at the
395 top of the kernel stack. */ 397 top of the kernel stack. */
396 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE 398 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
397 399
398 mr r1,r8 /* start using new stack pointer */ 400 mr r1,r8 /* start using new stack pointer */
399 std r7,PACAKSAVE(r13) 401 std r7,PACAKSAVE(r13)
400 402
401 ld r6,_CCR(r1) 403 ld r6,_CCR(r1)
402 mtcrf 0xFF,r6 404 mtcrf 0xFF,r6
403 405
404 #ifdef CONFIG_ALTIVEC 406 #ifdef CONFIG_ALTIVEC
405 BEGIN_FTR_SECTION 407 BEGIN_FTR_SECTION
406 ld r0,THREAD_VRSAVE(r4) 408 ld r0,THREAD_VRSAVE(r4)
407 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ 409 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
408 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 410 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
409 #endif /* CONFIG_ALTIVEC */ 411 #endif /* CONFIG_ALTIVEC */
410 412
411 /* r3-r13 are destroyed -- Cort */ 413 /* r3-r13 are destroyed -- Cort */
412 REST_8GPRS(14, r1) 414 REST_8GPRS(14, r1)
413 REST_10GPRS(22, r1) 415 REST_10GPRS(22, r1)
414 416
415 /* convert old thread to its task_struct for return value */ 417 /* convert old thread to its task_struct for return value */
416 addi r3,r3,-THREAD 418 addi r3,r3,-THREAD
417 ld r7,_NIP(r1) /* Return to _switch caller in new task */ 419 ld r7,_NIP(r1) /* Return to _switch caller in new task */
418 mtlr r7 420 mtlr r7
419 addi r1,r1,SWITCH_FRAME_SIZE 421 addi r1,r1,SWITCH_FRAME_SIZE
420 blr 422 blr
421 423
422 .align 7 424 .align 7
423 _GLOBAL(ret_from_except) 425 _GLOBAL(ret_from_except)
424 ld r11,_TRAP(r1) 426 ld r11,_TRAP(r1)
425 andi. r0,r11,1 427 andi. r0,r11,1
426 bne .ret_from_except_lite 428 bne .ret_from_except_lite
427 REST_NVGPRS(r1) 429 REST_NVGPRS(r1)
428 430
429 _GLOBAL(ret_from_except_lite) 431 _GLOBAL(ret_from_except_lite)
430 /* 432 /*
431 * Disable interrupts so that current_thread_info()->flags 433 * Disable interrupts so that current_thread_info()->flags
432 * can't change between when we test it and when we return 434 * can't change between when we test it and when we return
433 * from the interrupt. 435 * from the interrupt.
434 */ 436 */
435 mfmsr r10 /* Get current interrupt state */ 437 mfmsr r10 /* Get current interrupt state */
436 rldicl r9,r10,48,1 /* clear MSR_EE */ 438 rldicl r9,r10,48,1 /* clear MSR_EE */
437 rotldi r9,r9,16 439 rotldi r9,r9,16
438 mtmsrd r9,1 /* Update machine state */ 440 mtmsrd r9,1 /* Update machine state */
439 441
440 #ifdef CONFIG_PREEMPT 442 #ifdef CONFIG_PREEMPT
441 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ 443 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
442 li r0,_TIF_NEED_RESCHED /* bits to check */ 444 li r0,_TIF_NEED_RESCHED /* bits to check */
443 ld r3,_MSR(r1) 445 ld r3,_MSR(r1)
444 ld r4,TI_FLAGS(r9) 446 ld r4,TI_FLAGS(r9)
445 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */ 447 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
446 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING 448 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
447 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */ 449 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
448 bne do_work 450 bne do_work
449 451
450 #else /* !CONFIG_PREEMPT */ 452 #else /* !CONFIG_PREEMPT */
451 ld r3,_MSR(r1) /* Returning to user mode? */ 453 ld r3,_MSR(r1) /* Returning to user mode? */
452 andi. r3,r3,MSR_PR 454 andi. r3,r3,MSR_PR
453 beq restore /* if not, just restore regs and return */ 455 beq restore /* if not, just restore regs and return */
454 456
455 /* Check current_thread_info()->flags */ 457 /* Check current_thread_info()->flags */
456 clrrdi r9,r1,THREAD_SHIFT 458 clrrdi r9,r1,THREAD_SHIFT
457 ld r4,TI_FLAGS(r9) 459 ld r4,TI_FLAGS(r9)
458 andi. r0,r4,_TIF_USER_WORK_MASK 460 andi. r0,r4,_TIF_USER_WORK_MASK
459 bne do_work 461 bne do_work
460 #endif 462 #endif
461 463
462 restore: 464 restore:
465 ld r5,SOFTE(r1)
463 #ifdef CONFIG_PPC_ISERIES 466 #ifdef CONFIG_PPC_ISERIES
464 BEGIN_FW_FTR_SECTION 467 BEGIN_FW_FTR_SECTION
465 ld r5,SOFTE(r1)
466 cmpdi 0,r5,0 468 cmpdi 0,r5,0
467 beq 4f 469 beq 4f
468 /* Check for pending interrupts (iSeries) */ 470 /* Check for pending interrupts (iSeries) */
469 ld r3,PACALPPACAPTR(r13) 471 ld r3,PACALPPACAPTR(r13)
470 ld r3,LPPACAANYINT(r3) 472 ld r3,LPPACAANYINT(r3)
471 cmpdi r3,0 473 cmpdi r3,0
472 beq+ 4f /* skip do_IRQ if no interrupts */ 474 beq+ 4f /* skip do_IRQ if no interrupts */
473 475
474 li r3,0 476 li r3,0
475 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */ 477 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
476 ori r10,r10,MSR_EE 478 ori r10,r10,MSR_EE
477 mtmsrd r10 /* hard-enable again */ 479 mtmsrd r10 /* hard-enable again */
478 addi r3,r1,STACK_FRAME_OVERHEAD 480 addi r3,r1,STACK_FRAME_OVERHEAD
479 bl .do_IRQ 481 bl .do_IRQ
480 b .ret_from_except_lite /* loop back and handle more */ 482 b .ret_from_except_lite /* loop back and handle more */
481 483 4:
482 4: stb r5,PACAPROCENABLED(r13)
483 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 484 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
484 #endif 485 #endif
486 stb r5,PACASOFTIRQEN(r13)
485 487
486 ld r3,_MSR(r1) 488 ld r3,_MSR(r1)
487 andi. r0,r3,MSR_RI 489 andi. r0,r3,MSR_RI
488 beq- unrecov_restore 490 beq- unrecov_restore
489 491
490 andi. r0,r3,MSR_PR 492 andi. r0,r3,MSR_PR
491 493
492 /* 494 /*
493 * r13 is our per cpu area, only restore it if we are returning to 495 * r13 is our per cpu area, only restore it if we are returning to
494 * userspace 496 * userspace
495 */ 497 */
496 beq 1f 498 beq 1f
497 ACCOUNT_CPU_USER_EXIT(r3, r4) 499 ACCOUNT_CPU_USER_EXIT(r3, r4)
498 REST_GPR(13, r1) 500 REST_GPR(13, r1)
499 1: 501 1:
500 ld r3,_CTR(r1) 502 ld r3,_CTR(r1)
501 ld r0,_LINK(r1) 503 ld r0,_LINK(r1)
502 mtctr r3 504 mtctr r3
503 mtlr r0 505 mtlr r0
504 ld r3,_XER(r1) 506 ld r3,_XER(r1)
505 mtspr SPRN_XER,r3 507 mtspr SPRN_XER,r3
506 508
507 REST_8GPRS(5, r1) 509 REST_8GPRS(5, r1)
508 510
509 stdcx. r0,0,r1 /* to clear the reservation */ 511 stdcx. r0,0,r1 /* to clear the reservation */
510 512
511 mfmsr r0 513 mfmsr r0
512 li r2, MSR_RI 514 li r2, MSR_RI
513 andc r0,r0,r2 515 andc r0,r0,r2
514 mtmsrd r0,1 516 mtmsrd r0,1
515 517
516 ld r0,_MSR(r1) 518 ld r0,_MSR(r1)
517 mtspr SPRN_SRR1,r0 519 mtspr SPRN_SRR1,r0
518 520
519 ld r2,_CCR(r1) 521 ld r2,_CCR(r1)
520 mtcrf 0xFF,r2 522 mtcrf 0xFF,r2
521 ld r2,_NIP(r1) 523 ld r2,_NIP(r1)
522 mtspr SPRN_SRR0,r2 524 mtspr SPRN_SRR0,r2
523 525
524 ld r0,GPR0(r1) 526 ld r0,GPR0(r1)
525 ld r2,GPR2(r1) 527 ld r2,GPR2(r1)
526 ld r3,GPR3(r1) 528 ld r3,GPR3(r1)
527 ld r4,GPR4(r1) 529 ld r4,GPR4(r1)
528 ld r1,GPR1(r1) 530 ld r1,GPR1(r1)
529 531
530 rfid 532 rfid
531 b . /* prevent speculative execution */ 533 b . /* prevent speculative execution */
532 534
533 /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */ 535 /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
534 do_work: 536 do_work:
535 #ifdef CONFIG_PREEMPT 537 #ifdef CONFIG_PREEMPT
536 andi. r0,r3,MSR_PR /* Returning to user mode? */ 538 andi. r0,r3,MSR_PR /* Returning to user mode? */
537 bne user_work 539 bne user_work
538 /* Check that preempt_count() == 0 and interrupts are enabled */ 540 /* Check that preempt_count() == 0 and interrupts are enabled */
539 lwz r8,TI_PREEMPT(r9) 541 lwz r8,TI_PREEMPT(r9)
540 cmpwi cr1,r8,0 542 cmpwi cr1,r8,0
541 #ifdef CONFIG_PPC_ISERIES
542 BEGIN_FW_FTR_SECTION
543 ld r0,SOFTE(r1) 543 ld r0,SOFTE(r1)
544 cmpdi r0,0 544 cmpdi r0,0
545 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
546 #endif
547 BEGIN_FW_FTR_SECTION
548 andi. r0,r3,MSR_EE
549 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
550 crandc eq,cr1*4+eq,eq 545 crandc eq,cr1*4+eq,eq
551 bne restore 546 bne restore
552 /* here we are preempting the current task */ 547 /* here we are preempting the current task */
553 1: 548 1:
554 #ifdef CONFIG_PPC_ISERIES
555 BEGIN_FW_FTR_SECTION
556 li r0,1 549 li r0,1
557 stb r0,PACAPROCENABLED(r13) 550 stb r0,PACASOFTIRQEN(r13)
558 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 551 stb r0,PACAHARDIRQEN(r13)
559 #endif
560 ori r10,r10,MSR_EE 552 ori r10,r10,MSR_EE
561 mtmsrd r10,1 /* reenable interrupts */ 553 mtmsrd r10,1 /* reenable interrupts */
562 bl .preempt_schedule 554 bl .preempt_schedule
563 mfmsr r10 555 mfmsr r10
564 clrrdi r9,r1,THREAD_SHIFT 556 clrrdi r9,r1,THREAD_SHIFT
565 rldicl r10,r10,48,1 /* disable interrupts again */ 557 rldicl r10,r10,48,1 /* disable interrupts again */
566 rotldi r10,r10,16 558 rotldi r10,r10,16
567 mtmsrd r10,1 559 mtmsrd r10,1
568 ld r4,TI_FLAGS(r9) 560 ld r4,TI_FLAGS(r9)
569 andi. r0,r4,_TIF_NEED_RESCHED 561 andi. r0,r4,_TIF_NEED_RESCHED
570 bne 1b 562 bne 1b
571 b restore 563 b restore
572 564
573 user_work: 565 user_work:
574 #endif 566 #endif
575 /* Enable interrupts */ 567 /* Enable interrupts */
576 ori r10,r10,MSR_EE 568 ori r10,r10,MSR_EE
577 mtmsrd r10,1 569 mtmsrd r10,1
578 570
579 andi. r0,r4,_TIF_NEED_RESCHED 571 andi. r0,r4,_TIF_NEED_RESCHED
580 beq 1f 572 beq 1f
581 bl .schedule 573 bl .schedule
582 b .ret_from_except_lite 574 b .ret_from_except_lite
583 575
584 1: bl .save_nvgprs 576 1: bl .save_nvgprs
585 li r3,0 577 li r3,0
586 addi r4,r1,STACK_FRAME_OVERHEAD 578 addi r4,r1,STACK_FRAME_OVERHEAD
587 bl .do_signal 579 bl .do_signal
588 b .ret_from_except 580 b .ret_from_except
589 581
590 unrecov_restore: 582 unrecov_restore:
591 addi r3,r1,STACK_FRAME_OVERHEAD 583 addi r3,r1,STACK_FRAME_OVERHEAD
592 bl .unrecoverable_exception 584 bl .unrecoverable_exception
593 b unrecov_restore 585 b unrecov_restore
594 586
595 #ifdef CONFIG_PPC_RTAS 587 #ifdef CONFIG_PPC_RTAS
596 /* 588 /*
597 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be 589 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
598 * called with the MMU off. 590 * called with the MMU off.
599 * 591 *
600 * In addition, we need to be in 32b mode, at least for now. 592 * In addition, we need to be in 32b mode, at least for now.
601 * 593 *
602 * Note: r3 is an input parameter to rtas, so don't trash it... 594 * Note: r3 is an input parameter to rtas, so don't trash it...
603 */ 595 */
604 _GLOBAL(enter_rtas) 596 _GLOBAL(enter_rtas)
605 mflr r0 597 mflr r0
606 std r0,16(r1) 598 std r0,16(r1)
607 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ 599 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
608 600
609 /* Because RTAS is running in 32b mode, it clobbers the high order half 601 /* Because RTAS is running in 32b mode, it clobbers the high order half
610 * of all registers that it saves. We therefore save those registers 602 * of all registers that it saves. We therefore save those registers
611 * RTAS might touch to the stack. (r0, r3-r13 are caller saved) 603 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
612 */ 604 */
613 SAVE_GPR(2, r1) /* Save the TOC */ 605 SAVE_GPR(2, r1) /* Save the TOC */
614 SAVE_GPR(13, r1) /* Save paca */ 606 SAVE_GPR(13, r1) /* Save paca */
615 SAVE_8GPRS(14, r1) /* Save the non-volatiles */ 607 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
616 SAVE_10GPRS(22, r1) /* ditto */ 608 SAVE_10GPRS(22, r1) /* ditto */
617 609
618 mfcr r4 610 mfcr r4
619 std r4,_CCR(r1) 611 std r4,_CCR(r1)
620 mfctr r5 612 mfctr r5
621 std r5,_CTR(r1) 613 std r5,_CTR(r1)
622 mfspr r6,SPRN_XER 614 mfspr r6,SPRN_XER
623 std r6,_XER(r1) 615 std r6,_XER(r1)
624 mfdar r7 616 mfdar r7
625 std r7,_DAR(r1) 617 std r7,_DAR(r1)
626 mfdsisr r8 618 mfdsisr r8
627 std r8,_DSISR(r1) 619 std r8,_DSISR(r1)
628 mfsrr0 r9 620 mfsrr0 r9
629 std r9,_SRR0(r1) 621 std r9,_SRR0(r1)
630 mfsrr1 r10 622 mfsrr1 r10
631 std r10,_SRR1(r1) 623 std r10,_SRR1(r1)
632 624
633 /* Temporary workaround to clear CR until RTAS can be modified to 625 /* Temporary workaround to clear CR until RTAS can be modified to
634 * ignore all bits. 626 * ignore all bits.
635 */ 627 */
636 li r0,0 628 li r0,0
637 mtcr r0 629 mtcr r0
638 630
639 /* There is no way it is acceptable to get here with interrupts enabled, 631 /* There is no way it is acceptable to get here with interrupts enabled,
640 * check it with the asm equivalent of WARN_ON 632 * check it with the asm equivalent of WARN_ON
641 */ 633 */
642 mfmsr r6 634 lbz r0,PACASOFTIRQEN(r13)
643 andi. r0,r6,MSR_EE
644 1: tdnei r0,0 635 1: tdnei r0,0
645 .section __bug_table,"a" 636 .section __bug_table,"a"
646 .llong 1b,__LINE__ + 0x1000000, 1f, 2f 637 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
647 .previous 638 .previous
648 .section .rodata,"a" 639 .section .rodata,"a"
649 1: .asciz __FILE__ 640 1: .asciz __FILE__
650 2: .asciz "enter_rtas" 641 2: .asciz "enter_rtas"
651 .previous 642 .previous
652 643
644 /* Hard-disable interrupts */
645 mfmsr r6
646 rldicl r7,r6,48,1
647 rotldi r7,r7,16
648 mtmsrd r7,1
649
653 /* Unfortunately, the stack pointer and the MSR are also clobbered, 650 /* Unfortunately, the stack pointer and the MSR are also clobbered,
654 * so they are saved in the PACA which allows us to restore 651 * so they are saved in the PACA which allows us to restore
655 * our original state after RTAS returns. 652 * our original state after RTAS returns.
656 */ 653 */
657 std r1,PACAR1(r13) 654 std r1,PACAR1(r13)
658 std r6,PACASAVEDMSR(r13) 655 std r6,PACASAVEDMSR(r13)
659 656
660 /* Setup our real return addr */ 657 /* Setup our real return addr */
661 LOAD_REG_ADDR(r4,.rtas_return_loc) 658 LOAD_REG_ADDR(r4,.rtas_return_loc)
662 clrldi r4,r4,2 /* convert to realmode address */ 659 clrldi r4,r4,2 /* convert to realmode address */
663 mtlr r4 660 mtlr r4
664 661
665 li r0,0 662 li r0,0
666 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI 663 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
667 andc r0,r6,r0 664 andc r0,r6,r0
668 665
669 li r9,1 666 li r9,1
670 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) 667 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
671 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP 668 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
672 andc r6,r0,r9 669 andc r6,r0,r9
673 ori r6,r6,MSR_RI 670 ori r6,r6,MSR_RI
674 sync /* disable interrupts so SRR0/1 */ 671 sync /* disable interrupts so SRR0/1 */
675 mtmsrd r0 /* don't get trashed */ 672 mtmsrd r0 /* don't get trashed */
676 673
677 LOAD_REG_ADDR(r4, rtas) 674 LOAD_REG_ADDR(r4, rtas)
678 ld r5,RTASENTRY(r4) /* get the rtas->entry value */ 675 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
679 ld r4,RTASBASE(r4) /* get the rtas->base value */ 676 ld r4,RTASBASE(r4) /* get the rtas->base value */
680 677
681 mtspr SPRN_SRR0,r5 678 mtspr SPRN_SRR0,r5
682 mtspr SPRN_SRR1,r6 679 mtspr SPRN_SRR1,r6
683 rfid 680 rfid
684 b . /* prevent speculative execution */ 681 b . /* prevent speculative execution */
685 682
686 _STATIC(rtas_return_loc) 683 _STATIC(rtas_return_loc)
687 /* relocation is off at this point */ 684 /* relocation is off at this point */
688 mfspr r4,SPRN_SPRG3 /* Get PACA */ 685 mfspr r4,SPRN_SPRG3 /* Get PACA */
689 clrldi r4,r4,2 /* convert to realmode address */ 686 clrldi r4,r4,2 /* convert to realmode address */
690 687
691 mfmsr r6 688 mfmsr r6
692 li r0,MSR_RI 689 li r0,MSR_RI
693 andc r6,r6,r0 690 andc r6,r6,r0
694 sync 691 sync
695 mtmsrd r6 692 mtmsrd r6
696 693
697 ld r1,PACAR1(r4) /* Restore our SP */ 694 ld r1,PACAR1(r4) /* Restore our SP */
698 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs) 695 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
699 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ 696 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
700 697
701 mtspr SPRN_SRR0,r3 698 mtspr SPRN_SRR0,r3
702 mtspr SPRN_SRR1,r4 699 mtspr SPRN_SRR1,r4
703 rfid 700 rfid
704 b . /* prevent speculative execution */ 701 b . /* prevent speculative execution */
705 702
706 _STATIC(rtas_restore_regs) 703 _STATIC(rtas_restore_regs)
707 /* relocation is on at this point */ 704 /* relocation is on at this point */
708 REST_GPR(2, r1) /* Restore the TOC */ 705 REST_GPR(2, r1) /* Restore the TOC */
709 REST_GPR(13, r1) /* Restore paca */ 706 REST_GPR(13, r1) /* Restore paca */
710 REST_8GPRS(14, r1) /* Restore the non-volatiles */ 707 REST_8GPRS(14, r1) /* Restore the non-volatiles */
711 REST_10GPRS(22, r1) /* ditto */ 708 REST_10GPRS(22, r1) /* ditto */
712 709
713 mfspr r13,SPRN_SPRG3 710 mfspr r13,SPRN_SPRG3
714 711
715 ld r4,_CCR(r1) 712 ld r4,_CCR(r1)
716 mtcr r4 713 mtcr r4
717 ld r5,_CTR(r1) 714 ld r5,_CTR(r1)
718 mtctr r5 715 mtctr r5
719 ld r6,_XER(r1) 716 ld r6,_XER(r1)
720 mtspr SPRN_XER,r6 717 mtspr SPRN_XER,r6
721 ld r7,_DAR(r1) 718 ld r7,_DAR(r1)
722 mtdar r7 719 mtdar r7
723 ld r8,_DSISR(r1) 720 ld r8,_DSISR(r1)
724 mtdsisr r8 721 mtdsisr r8
725 ld r9,_SRR0(r1) 722 ld r9,_SRR0(r1)
726 mtsrr0 r9 723 mtsrr0 r9
727 ld r10,_SRR1(r1) 724 ld r10,_SRR1(r1)
728 mtsrr1 r10 725 mtsrr1 r10
729 726
730 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ 727 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
731 ld r0,16(r1) /* get return address */ 728 ld r0,16(r1) /* get return address */
732 729
733 mtlr r0 730 mtlr r0
734 blr /* return to caller */ 731 blr /* return to caller */
735 732
736 #endif /* CONFIG_PPC_RTAS */ 733 #endif /* CONFIG_PPC_RTAS */
737 734
738 #ifdef CONFIG_PPC_MULTIPLATFORM 735 #ifdef CONFIG_PPC_MULTIPLATFORM
739 736
740 _GLOBAL(enter_prom) 737 _GLOBAL(enter_prom)
741 mflr r0 738 mflr r0
742 std r0,16(r1) 739 std r0,16(r1)
743 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ 740 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
744 741
745 /* Because PROM is running in 32b mode, it clobbers the high order half 742 /* Because PROM is running in 32b mode, it clobbers the high order half
746 * of all registers that it saves. We therefore save those registers 743 * of all registers that it saves. We therefore save those registers
747 * PROM might touch to the stack. (r0, r3-r13 are caller saved) 744 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
748 */ 745 */
749 SAVE_8GPRS(2, r1) 746 SAVE_8GPRS(2, r1)
750 SAVE_GPR(13, r1) 747 SAVE_GPR(13, r1)
751 SAVE_8GPRS(14, r1) 748 SAVE_8GPRS(14, r1)
752 SAVE_10GPRS(22, r1) 749 SAVE_10GPRS(22, r1)
753 mfcr r4 750 mfcr r4
754 std r4,_CCR(r1) 751 std r4,_CCR(r1)
755 mfctr r5 752 mfctr r5
756 std r5,_CTR(r1) 753 std r5,_CTR(r1)
757 mfspr r6,SPRN_XER 754 mfspr r6,SPRN_XER
758 std r6,_XER(r1) 755 std r6,_XER(r1)
759 mfdar r7 756 mfdar r7
760 std r7,_DAR(r1) 757 std r7,_DAR(r1)
761 mfdsisr r8 758 mfdsisr r8
762 std r8,_DSISR(r1) 759 std r8,_DSISR(r1)
763 mfsrr0 r9 760 mfsrr0 r9
764 std r9,_SRR0(r1) 761 std r9,_SRR0(r1)
765 mfsrr1 r10 762 mfsrr1 r10
766 std r10,_SRR1(r1) 763 std r10,_SRR1(r1)
767 mfmsr r11 764 mfmsr r11
768 std r11,_MSR(r1) 765 std r11,_MSR(r1)
769 766
770 /* Get the PROM entrypoint */ 767 /* Get the PROM entrypoint */
771 ld r0,GPR4(r1) 768 ld r0,GPR4(r1)
772 mtlr r0 769 mtlr r0
773 770
774 /* Switch MSR to 32 bits mode 771 /* Switch MSR to 32 bits mode
775 */ 772 */
776 mfmsr r11 773 mfmsr r11
777 li r12,1 774 li r12,1
778 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) 775 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
779 andc r11,r11,r12 776 andc r11,r11,r12
780 li r12,1 777 li r12,1
781 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) 778 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
782 andc r11,r11,r12 779 andc r11,r11,r12
783 mtmsrd r11 780 mtmsrd r11
784 isync 781 isync
785 782
786 /* Restore arguments & enter PROM here... */ 783 /* Restore arguments & enter PROM here... */
787 ld r3,GPR3(r1) 784 ld r3,GPR3(r1)
788 blrl 785 blrl
789 786
790 /* Just make sure that r1 top 32 bits didn't get 787 /* Just make sure that r1 top 32 bits didn't get
791 * corrupt by OF 788 * corrupt by OF
792 */ 789 */
793 rldicl r1,r1,0,32 790 rldicl r1,r1,0,32
794 791
795 /* Restore the MSR (back to 64 bits) */ 792 /* Restore the MSR (back to 64 bits) */
796 ld r0,_MSR(r1) 793 ld r0,_MSR(r1)
797 mtmsrd r0 794 mtmsrd r0
798 isync 795 isync
799 796
800 /* Restore other registers */ 797 /* Restore other registers */
801 REST_GPR(2, r1) 798 REST_GPR(2, r1)
802 REST_GPR(13, r1) 799 REST_GPR(13, r1)
803 REST_8GPRS(14, r1) 800 REST_8GPRS(14, r1)
804 REST_10GPRS(22, r1) 801 REST_10GPRS(22, r1)
805 ld r4,_CCR(r1) 802 ld r4,_CCR(r1)
806 mtcr r4 803 mtcr r4
807 ld r5,_CTR(r1) 804 ld r5,_CTR(r1)
808 mtctr r5 805 mtctr r5
809 ld r6,_XER(r1) 806 ld r6,_XER(r1)
810 mtspr SPRN_XER,r6 807 mtspr SPRN_XER,r6
811 ld r7,_DAR(r1) 808 ld r7,_DAR(r1)
812 mtdar r7 809 mtdar r7
813 ld r8,_DSISR(r1) 810 ld r8,_DSISR(r1)
814 mtdsisr r8 811 mtdsisr r8
arch/powerpc/kernel/head_64.S
1 /* 1 /*
2 * PowerPC version 2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * 4 *
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras. 7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support 8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras. 9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras. 10 * Copyright (C) 1996 Paul Mackerras.
11 * 11 *
12 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and 12 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
13 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com 13 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
14 * 14 *
15 * This file contains the low-level support and setup for the 15 * This file contains the low-level support and setup for the
16 * PowerPC-64 platform, including trap and interrupt dispatch. 16 * PowerPC-64 platform, including trap and interrupt dispatch.
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License 19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version 20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version. 21 * 2 of the License, or (at your option) any later version.
22 */ 22 */
23 23
24 #include <linux/threads.h> 24 #include <linux/threads.h>
25 #include <asm/reg.h> 25 #include <asm/reg.h>
26 #include <asm/page.h> 26 #include <asm/page.h>
27 #include <asm/mmu.h> 27 #include <asm/mmu.h>
28 #include <asm/ppc_asm.h> 28 #include <asm/ppc_asm.h>
29 #include <asm/asm-offsets.h> 29 #include <asm/asm-offsets.h>
30 #include <asm/bug.h> 30 #include <asm/bug.h>
31 #include <asm/cputable.h> 31 #include <asm/cputable.h>
32 #include <asm/setup.h> 32 #include <asm/setup.h>
33 #include <asm/hvcall.h> 33 #include <asm/hvcall.h>
34 #include <asm/iseries/lpar_map.h> 34 #include <asm/iseries/lpar_map.h>
35 #include <asm/thread_info.h> 35 #include <asm/thread_info.h>
36 #include <asm/firmware.h> 36 #include <asm/firmware.h>
37 37
38 #ifdef CONFIG_PPC_ISERIES
39 #define DO_SOFT_DISABLE 38 #define DO_SOFT_DISABLE
40 #endif
41 39
42 /* 40 /*
43 * We layout physical memory as follows: 41 * We layout physical memory as follows:
44 * 0x0000 - 0x00ff : Secondary processor spin code 42 * 0x0000 - 0x00ff : Secondary processor spin code
45 * 0x0100 - 0x2fff : pSeries Interrupt prologs 43 * 0x0100 - 0x2fff : pSeries Interrupt prologs
46 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs 44 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
47 * 0x6000 - 0x6fff : Initial (CPU0) segment table 45 * 0x6000 - 0x6fff : Initial (CPU0) segment table
48 * 0x7000 - 0x7fff : FWNMI data area 46 * 0x7000 - 0x7fff : FWNMI data area
49 * 0x8000 - : Early init and support code 47 * 0x8000 - : Early init and support code
50 */ 48 */
51 49
52 /* 50 /*
53 * SPRG Usage 51 * SPRG Usage
54 * 52 *
55 * Register Definition 53 * Register Definition
56 * 54 *
57 * SPRG0 reserved for hypervisor 55 * SPRG0 reserved for hypervisor
58 * SPRG1 temp - used to save gpr 56 * SPRG1 temp - used to save gpr
59 * SPRG2 temp - used to save gpr 57 * SPRG2 temp - used to save gpr
60 * SPRG3 virt addr of paca 58 * SPRG3 virt addr of paca
61 */ 59 */
62 60
63 /* 61 /*
64 * Entering into this code we make the following assumptions: 62 * Entering into this code we make the following assumptions:
65 * For pSeries: 63 * For pSeries:
66 * 1. The MMU is off & open firmware is running in real mode. 64 * 1. The MMU is off & open firmware is running in real mode.
67 * 2. The kernel is entered at __start 65 * 2. The kernel is entered at __start
68 * 66 *
69 * For iSeries: 67 * For iSeries:
70 * 1. The MMU is on (as it always is for iSeries) 68 * 1. The MMU is on (as it always is for iSeries)
71 * 2. The kernel is entered at system_reset_iSeries 69 * 2. The kernel is entered at system_reset_iSeries
72 */ 70 */
73 71
74 .text 72 .text
75 .globl _stext 73 .globl _stext
76 _stext: 74 _stext:
77 #ifdef CONFIG_PPC_MULTIPLATFORM 75 #ifdef CONFIG_PPC_MULTIPLATFORM
78 _GLOBAL(__start) 76 _GLOBAL(__start)
79 /* NOP this out unconditionally */ 77 /* NOP this out unconditionally */
80 BEGIN_FTR_SECTION 78 BEGIN_FTR_SECTION
81 b .__start_initialization_multiplatform 79 b .__start_initialization_multiplatform
82 END_FTR_SECTION(0, 1) 80 END_FTR_SECTION(0, 1)
83 #endif /* CONFIG_PPC_MULTIPLATFORM */ 81 #endif /* CONFIG_PPC_MULTIPLATFORM */
84 82
85 /* Catch branch to 0 in real mode */ 83 /* Catch branch to 0 in real mode */
86 trap 84 trap
87 85
88 /* Secondary processors spin on this value until it goes to 1. */ 86 /* Secondary processors spin on this value until it goes to 1. */
89 .globl __secondary_hold_spinloop 87 .globl __secondary_hold_spinloop
90 __secondary_hold_spinloop: 88 __secondary_hold_spinloop:
91 .llong 0x0 89 .llong 0x0
92 90
93 /* Secondary processors write this value with their cpu # */ 91 /* Secondary processors write this value with their cpu # */
94 /* after they enter the spin loop immediately below. */ 92 /* after they enter the spin loop immediately below. */
95 .globl __secondary_hold_acknowledge 93 .globl __secondary_hold_acknowledge
96 __secondary_hold_acknowledge: 94 __secondary_hold_acknowledge:
97 .llong 0x0 95 .llong 0x0
98 96
99 #ifdef CONFIG_PPC_ISERIES 97 #ifdef CONFIG_PPC_ISERIES
100 /* 98 /*
101 * At offset 0x20, there is a pointer to iSeries LPAR data. 99 * At offset 0x20, there is a pointer to iSeries LPAR data.
102 * This is required by the hypervisor 100 * This is required by the hypervisor
103 */ 101 */
104 . = 0x20 102 . = 0x20
105 .llong hvReleaseData-KERNELBASE 103 .llong hvReleaseData-KERNELBASE
106 #endif /* CONFIG_PPC_ISERIES */ 104 #endif /* CONFIG_PPC_ISERIES */
107 105
108 . = 0x60 106 . = 0x60
109 /* 107 /*
110 * The following code is used on pSeries to hold secondary processors 108 * The following code is used on pSeries to hold secondary processors
111 * in a spin loop after they have been freed from OpenFirmware, but 109 * in a spin loop after they have been freed from OpenFirmware, but
112 * before the bulk of the kernel has been relocated. This code 110 * before the bulk of the kernel has been relocated. This code
113 * is relocated to physical address 0x60 before prom_init is run. 111 * is relocated to physical address 0x60 before prom_init is run.
114 * All of it must fit below the first exception vector at 0x100. 112 * All of it must fit below the first exception vector at 0x100.
115 */ 113 */
116 _GLOBAL(__secondary_hold) 114 _GLOBAL(__secondary_hold)
117 mfmsr r24 115 mfmsr r24
118 ori r24,r24,MSR_RI 116 ori r24,r24,MSR_RI
119 mtmsrd r24 /* RI on */ 117 mtmsrd r24 /* RI on */
120 118
121 /* Grab our physical cpu number */ 119 /* Grab our physical cpu number */
122 mr r24,r3 120 mr r24,r3
123 121
124 /* Tell the master cpu we're here */ 122 /* Tell the master cpu we're here */
125 /* Relocation is off & we are located at an address less */ 123 /* Relocation is off & we are located at an address less */
126 /* than 0x100, so only need to grab low order offset. */ 124 /* than 0x100, so only need to grab low order offset. */
127 std r24,__secondary_hold_acknowledge@l(0) 125 std r24,__secondary_hold_acknowledge@l(0)
128 sync 126 sync
129 127
130 /* All secondary cpus wait here until told to start. */ 128 /* All secondary cpus wait here until told to start. */
131 100: ld r4,__secondary_hold_spinloop@l(0) 129 100: ld r4,__secondary_hold_spinloop@l(0)
132 cmpdi 0,r4,1 130 cmpdi 0,r4,1
133 bne 100b 131 bne 100b
134 132
135 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 133 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
136 LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init) 134 LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init)
137 mtctr r4 135 mtctr r4
138 mr r3,r24 136 mr r3,r24
139 bctr 137 bctr
140 #else 138 #else
141 BUG_OPCODE 139 BUG_OPCODE
142 #endif 140 #endif
143 141
144 /* This value is used to mark exception frames on the stack. */ 142 /* This value is used to mark exception frames on the stack. */
145 .section ".toc","aw" 143 .section ".toc","aw"
146 exception_marker: 144 exception_marker:
147 .tc ID_72656773_68657265[TC],0x7265677368657265 145 .tc ID_72656773_68657265[TC],0x7265677368657265
148 .text 146 .text
149 147
150 /* 148 /*
151 * The following macros define the code that appears as 149 * The following macros define the code that appears as
152 * the prologue to each of the exception handlers. They 150 * the prologue to each of the exception handlers. They
153 * are split into two parts to allow a single kernel binary 151 * are split into two parts to allow a single kernel binary
154 * to be used for pSeries and iSeries. 152 * to be used for pSeries and iSeries.
155 * LOL. One day... - paulus 153 * LOL. One day... - paulus
156 */ 154 */
157 155
158 /* 156 /*
159 * We make as much of the exception code common between native 157 * We make as much of the exception code common between native
160 * exception handlers (including pSeries LPAR) and iSeries LPAR 158 * exception handlers (including pSeries LPAR) and iSeries LPAR
161 * implementations as possible. 159 * implementations as possible.
162 */ 160 */
163 161
164 /* 162 /*
165 * This is the start of the interrupt handlers for pSeries 163 * This is the start of the interrupt handlers for pSeries
166 * This code runs with relocation off. 164 * This code runs with relocation off.
167 */ 165 */
168 #define EX_R9 0 166 #define EX_R9 0
169 #define EX_R10 8 167 #define EX_R10 8
170 #define EX_R11 16 168 #define EX_R11 16
171 #define EX_R12 24 169 #define EX_R12 24
172 #define EX_R13 32 170 #define EX_R13 32
173 #define EX_SRR0 40 171 #define EX_SRR0 40
174 #define EX_DAR 48 172 #define EX_DAR 48
175 #define EX_DSISR 56 173 #define EX_DSISR 56
176 #define EX_CCR 60 174 #define EX_CCR 60
177 #define EX_R3 64 175 #define EX_R3 64
178 #define EX_LR 72 176 #define EX_LR 72
179 177
180 /* 178 /*
181 * We're short on space and time in the exception prolog, so we can't 179 * We're short on space and time in the exception prolog, so we can't
182 * use the normal SET_REG_IMMEDIATE macro. Normally we just need the 180 * use the normal SET_REG_IMMEDIATE macro. Normally we just need the
183 * low halfword of the address, but for Kdump we need the whole low 181 * low halfword of the address, but for Kdump we need the whole low
184 * word. 182 * word.
185 */ 183 */
186 #ifdef CONFIG_CRASH_DUMP 184 #ifdef CONFIG_CRASH_DUMP
187 #define LOAD_HANDLER(reg, label) \ 185 #define LOAD_HANDLER(reg, label) \
188 oris reg,reg,(label)@h; /* virt addr of handler ... */ \ 186 oris reg,reg,(label)@h; /* virt addr of handler ... */ \
189 ori reg,reg,(label)@l; /* .. and the rest */ 187 ori reg,reg,(label)@l; /* .. and the rest */
190 #else 188 #else
191 #define LOAD_HANDLER(reg, label) \ 189 #define LOAD_HANDLER(reg, label) \
192 ori reg,reg,(label)@l; /* virt addr of handler ... */ 190 ori reg,reg,(label)@l; /* virt addr of handler ... */
193 #endif 191 #endif
194 192
195 /* 193 /*
196 * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode. 194 * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode.
197 * The firmware calls the registered system_reset_fwnmi and 195 * The firmware calls the registered system_reset_fwnmi and
198 * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run 196 * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run
199 * a 32bit application at the time of the event. 197 * a 32bit application at the time of the event.
200 * This firmware bug is present on POWER4 and JS20. 198 * This firmware bug is present on POWER4 and JS20.
201 */ 199 */
202 #define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \ 200 #define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \
203 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 201 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
204 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 202 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
205 std r10,area+EX_R10(r13); \ 203 std r10,area+EX_R10(r13); \
206 std r11,area+EX_R11(r13); \ 204 std r11,area+EX_R11(r13); \
207 std r12,area+EX_R12(r13); \ 205 std r12,area+EX_R12(r13); \
208 mfspr r9,SPRN_SPRG1; \ 206 mfspr r9,SPRN_SPRG1; \
209 std r9,area+EX_R13(r13); \ 207 std r9,area+EX_R13(r13); \
210 mfcr r9; \ 208 mfcr r9; \
211 clrrdi r12,r13,32; /* get high part of &label */ \ 209 clrrdi r12,r13,32; /* get high part of &label */ \
212 mfmsr r10; \ 210 mfmsr r10; \
213 /* force 64bit mode */ \ 211 /* force 64bit mode */ \
214 li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \ 212 li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \
215 rldimi r10,r11,61,0; /* insert into top 3 bits */ \ 213 rldimi r10,r11,61,0; /* insert into top 3 bits */ \
216 /* done 64bit mode */ \ 214 /* done 64bit mode */ \
217 mfspr r11,SPRN_SRR0; /* save SRR0 */ \ 215 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
218 LOAD_HANDLER(r12,label) \ 216 LOAD_HANDLER(r12,label) \
219 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ 217 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
220 mtspr SPRN_SRR0,r12; \ 218 mtspr SPRN_SRR0,r12; \
221 mfspr r12,SPRN_SRR1; /* and SRR1 */ \ 219 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
222 mtspr SPRN_SRR1,r10; \ 220 mtspr SPRN_SRR1,r10; \
223 rfid; \ 221 rfid; \
224 b . /* prevent speculative execution */ 222 b . /* prevent speculative execution */
225 223
226 #define EXCEPTION_PROLOG_PSERIES(area, label) \ 224 #define EXCEPTION_PROLOG_PSERIES(area, label) \
227 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 225 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
228 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 226 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
229 std r10,area+EX_R10(r13); \ 227 std r10,area+EX_R10(r13); \
230 std r11,area+EX_R11(r13); \ 228 std r11,area+EX_R11(r13); \
231 std r12,area+EX_R12(r13); \ 229 std r12,area+EX_R12(r13); \
232 mfspr r9,SPRN_SPRG1; \ 230 mfspr r9,SPRN_SPRG1; \
233 std r9,area+EX_R13(r13); \ 231 std r9,area+EX_R13(r13); \
234 mfcr r9; \ 232 mfcr r9; \
235 clrrdi r12,r13,32; /* get high part of &label */ \ 233 clrrdi r12,r13,32; /* get high part of &label */ \
236 mfmsr r10; \ 234 mfmsr r10; \
237 mfspr r11,SPRN_SRR0; /* save SRR0 */ \ 235 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
238 LOAD_HANDLER(r12,label) \ 236 LOAD_HANDLER(r12,label) \
239 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ 237 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
240 mtspr SPRN_SRR0,r12; \ 238 mtspr SPRN_SRR0,r12; \
241 mfspr r12,SPRN_SRR1; /* and SRR1 */ \ 239 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
242 mtspr SPRN_SRR1,r10; \ 240 mtspr SPRN_SRR1,r10; \
243 rfid; \ 241 rfid; \
244 b . /* prevent speculative execution */ 242 b . /* prevent speculative execution */
245 243
246 /* 244 /*
247 * This is the start of the interrupt handlers for iSeries 245 * This is the start of the interrupt handlers for iSeries
248 * This code runs with relocation on. 246 * This code runs with relocation on.
249 */ 247 */
250 #define EXCEPTION_PROLOG_ISERIES_1(area) \ 248 #define EXCEPTION_PROLOG_ISERIES_1(area) \
251 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 249 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
252 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 250 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
253 std r10,area+EX_R10(r13); \ 251 std r10,area+EX_R10(r13); \
254 std r11,area+EX_R11(r13); \ 252 std r11,area+EX_R11(r13); \
255 std r12,area+EX_R12(r13); \ 253 std r12,area+EX_R12(r13); \
256 mfspr r9,SPRN_SPRG1; \ 254 mfspr r9,SPRN_SPRG1; \
257 std r9,area+EX_R13(r13); \ 255 std r9,area+EX_R13(r13); \
258 mfcr r9 256 mfcr r9
259 257
260 #define EXCEPTION_PROLOG_ISERIES_2 \ 258 #define EXCEPTION_PROLOG_ISERIES_2 \
261 mfmsr r10; \ 259 mfmsr r10; \
262 ld r12,PACALPPACAPTR(r13); \ 260 ld r12,PACALPPACAPTR(r13); \
263 ld r11,LPPACASRR0(r12); \ 261 ld r11,LPPACASRR0(r12); \
264 ld r12,LPPACASRR1(r12); \ 262 ld r12,LPPACASRR1(r12); \
265 ori r10,r10,MSR_RI; \ 263 ori r10,r10,MSR_RI; \
266 mtmsrd r10,1 264 mtmsrd r10,1
267 265
268 /* 266 /*
269 * The common exception prolog is used for all except a few exceptions 267 * The common exception prolog is used for all except a few exceptions
270 * such as a segment miss on a kernel address. We have to be prepared 268 * such as a segment miss on a kernel address. We have to be prepared
271 * to take another exception from the point where we first touch the 269 * to take another exception from the point where we first touch the
272 * kernel stack onwards. 270 * kernel stack onwards.
273 * 271 *
274 * On entry r13 points to the paca, r9-r13 are saved in the paca, 272 * On entry r13 points to the paca, r9-r13 are saved in the paca,
275 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and 273 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
276 * SRR1, and relocation is on. 274 * SRR1, and relocation is on.
277 */ 275 */
278 #define EXCEPTION_PROLOG_COMMON(n, area) \ 276 #define EXCEPTION_PROLOG_COMMON(n, area) \
279 andi. r10,r12,MSR_PR; /* See if coming from user */ \ 277 andi. r10,r12,MSR_PR; /* See if coming from user */ \
280 mr r10,r1; /* Save r1 */ \ 278 mr r10,r1; /* Save r1 */ \
281 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ 279 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
282 beq- 1f; \ 280 beq- 1f; \
283 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 281 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
284 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 282 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
285 bge- cr1,bad_stack; /* abort if it is */ \ 283 bge- cr1,bad_stack; /* abort if it is */ \
286 std r9,_CCR(r1); /* save CR in stackframe */ \ 284 std r9,_CCR(r1); /* save CR in stackframe */ \
287 std r11,_NIP(r1); /* save SRR0 in stackframe */ \ 285 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
288 std r12,_MSR(r1); /* save SRR1 in stackframe */ \ 286 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
289 std r10,0(r1); /* make stack chain pointer */ \ 287 std r10,0(r1); /* make stack chain pointer */ \
290 std r0,GPR0(r1); /* save r0 in stackframe */ \ 288 std r0,GPR0(r1); /* save r0 in stackframe */ \
291 std r10,GPR1(r1); /* save r1 in stackframe */ \ 289 std r10,GPR1(r1); /* save r1 in stackframe */ \
292 ACCOUNT_CPU_USER_ENTRY(r9, r10); \ 290 ACCOUNT_CPU_USER_ENTRY(r9, r10); \
293 std r2,GPR2(r1); /* save r2 in stackframe */ \ 291 std r2,GPR2(r1); /* save r2 in stackframe */ \
294 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 292 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
295 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 293 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
296 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ 294 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
297 ld r10,area+EX_R10(r13); \ 295 ld r10,area+EX_R10(r13); \
298 std r9,GPR9(r1); \ 296 std r9,GPR9(r1); \
299 std r10,GPR10(r1); \ 297 std r10,GPR10(r1); \
300 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ 298 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
301 ld r10,area+EX_R12(r13); \ 299 ld r10,area+EX_R12(r13); \
302 ld r11,area+EX_R13(r13); \ 300 ld r11,area+EX_R13(r13); \
303 std r9,GPR11(r1); \ 301 std r9,GPR11(r1); \
304 std r10,GPR12(r1); \ 302 std r10,GPR12(r1); \
305 std r11,GPR13(r1); \ 303 std r11,GPR13(r1); \
306 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 304 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
307 mflr r9; /* save LR in stackframe */ \ 305 mflr r9; /* save LR in stackframe */ \
308 std r9,_LINK(r1); \ 306 std r9,_LINK(r1); \
309 mfctr r10; /* save CTR in stackframe */ \ 307 mfctr r10; /* save CTR in stackframe */ \
310 std r10,_CTR(r1); \ 308 std r10,_CTR(r1); \
309 lbz r10,PACASOFTIRQEN(r13); \
311 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 310 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
311 std r10,SOFTE(r1); \
312 std r11,_XER(r1); \ 312 std r11,_XER(r1); \
313 li r9,(n)+1; \ 313 li r9,(n)+1; \
314 std r9,_TRAP(r1); /* set trap number */ \ 314 std r9,_TRAP(r1); /* set trap number */ \
315 li r10,0; \ 315 li r10,0; \
316 ld r11,exception_marker@toc(r2); \ 316 ld r11,exception_marker@toc(r2); \
317 std r10,RESULT(r1); /* clear regs->result */ \ 317 std r10,RESULT(r1); /* clear regs->result */ \
318 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ 318 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
319 319
320 /* 320 /*
321 * Exception vectors. 321 * Exception vectors.
322 */ 322 */
323 #define STD_EXCEPTION_PSERIES(n, label) \ 323 #define STD_EXCEPTION_PSERIES(n, label) \
324 . = n; \ 324 . = n; \
325 .globl label##_pSeries; \ 325 .globl label##_pSeries; \
326 label##_pSeries: \ 326 label##_pSeries: \
327 HMT_MEDIUM; \ 327 HMT_MEDIUM; \
328 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 328 mtspr SPRN_SPRG1,r13; /* save r13 */ \
329 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 329 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
330 330
331 #define HSTD_EXCEPTION_PSERIES(n, label) \ 331 #define HSTD_EXCEPTION_PSERIES(n, label) \
332 . = n; \ 332 . = n; \
333 .globl label##_pSeries; \ 333 .globl label##_pSeries; \
334 label##_pSeries: \ 334 label##_pSeries: \
335 HMT_MEDIUM; \ 335 HMT_MEDIUM; \
336 mtspr SPRN_SPRG1,r20; /* save r20 */ \ 336 mtspr SPRN_SPRG1,r20; /* save r20 */ \
337 mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \ 337 mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
338 mtspr SPRN_SRR0,r20; \ 338 mtspr SPRN_SRR0,r20; \
339 mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \ 339 mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
340 mtspr SPRN_SRR1,r20; \ 340 mtspr SPRN_SRR1,r20; \
341 mfspr r20,SPRN_SPRG1; /* restore r20 */ \ 341 mfspr r20,SPRN_SPRG1; /* restore r20 */ \
342 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 342 mtspr SPRN_SPRG1,r13; /* save r13 */ \
343 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 343 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
344 344
345 345
346 #define MASKABLE_EXCEPTION_PSERIES(n, label) \
347 . = n; \
348 .globl label##_pSeries; \
349 label##_pSeries: \
350 HMT_MEDIUM; \
351 mtspr SPRN_SPRG1,r13; /* save r13 */ \
352 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
353 std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
354 std r10,PACA_EXGEN+EX_R10(r13); \
355 lbz r10,PACASOFTIRQEN(r13); \
356 mfcr r9; \
357 cmpwi r10,0; \
358 beq masked_interrupt; \
359 mfspr r10,SPRN_SPRG1; \
360 std r10,PACA_EXGEN+EX_R13(r13); \
361 std r11,PACA_EXGEN+EX_R11(r13); \
362 std r12,PACA_EXGEN+EX_R12(r13); \
363 clrrdi r12,r13,32; /* get high part of &label */ \
364 mfmsr r10; \
365 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
366 LOAD_HANDLER(r12,label##_common) \
367 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
368 mtspr SPRN_SRR0,r12; \
369 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
370 mtspr SPRN_SRR1,r10; \
371 rfid; \
372 b . /* prevent speculative execution */
373
346 #define STD_EXCEPTION_ISERIES(n, label, area) \ 374 #define STD_EXCEPTION_ISERIES(n, label, area) \
347 .globl label##_iSeries; \ 375 .globl label##_iSeries; \
348 label##_iSeries: \ 376 label##_iSeries: \
349 HMT_MEDIUM; \ 377 HMT_MEDIUM; \
350 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 378 mtspr SPRN_SPRG1,r13; /* save r13 */ \
351 EXCEPTION_PROLOG_ISERIES_1(area); \ 379 EXCEPTION_PROLOG_ISERIES_1(area); \
352 EXCEPTION_PROLOG_ISERIES_2; \ 380 EXCEPTION_PROLOG_ISERIES_2; \
353 b label##_common 381 b label##_common
354 382
355 #define MASKABLE_EXCEPTION_ISERIES(n, label) \ 383 #define MASKABLE_EXCEPTION_ISERIES(n, label) \
356 .globl label##_iSeries; \ 384 .globl label##_iSeries; \
357 label##_iSeries: \ 385 label##_iSeries: \
358 HMT_MEDIUM; \ 386 HMT_MEDIUM; \
359 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 387 mtspr SPRN_SPRG1,r13; /* save r13 */ \
360 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ 388 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
361 lbz r10,PACAPROCENABLED(r13); \ 389 lbz r10,PACASOFTIRQEN(r13); \
362 cmpwi 0,r10,0; \ 390 cmpwi 0,r10,0; \
363 beq- label##_iSeries_masked; \ 391 beq- label##_iSeries_masked; \
364 EXCEPTION_PROLOG_ISERIES_2; \ 392 EXCEPTION_PROLOG_ISERIES_2; \
365 b label##_common; \ 393 b label##_common; \
366 394
367 #ifdef DO_SOFT_DISABLE 395 #ifdef CONFIG_PPC_ISERIES
368 #define DISABLE_INTS \ 396 #define DISABLE_INTS \
369 BEGIN_FW_FTR_SECTION; \
370 lbz r10,PACAPROCENABLED(r13); \
371 li r11,0; \ 397 li r11,0; \
372 std r10,SOFTE(r1); \ 398 stb r11,PACASOFTIRQEN(r13); \
399 BEGIN_FW_FTR_SECTION; \
400 stb r11,PACAHARDIRQEN(r13); \
401 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
402 BEGIN_FW_FTR_SECTION; \
373 mfmsr r10; \ 403 mfmsr r10; \
374 stb r11,PACAPROCENABLED(r13); \
375 ori r10,r10,MSR_EE; \ 404 ori r10,r10,MSR_EE; \
376 mtmsrd r10,1; \ 405 mtmsrd r10,1; \
377 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 406 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
378 407
379 #define ENABLE_INTS \ 408 #else
380 BEGIN_FW_FTR_SECTION; \ 409 #define DISABLE_INTS \
381 lbz r10,PACAPROCENABLED(r13); \ 410 li r11,0; \
382 mfmsr r11; \ 411 stb r11,PACASOFTIRQEN(r13); \
383 std r10,SOFTE(r1); \ 412 stb r11,PACAHARDIRQEN(r13)
384 ori r11,r11,MSR_EE; \
385 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES); \
386 BEGIN_FW_FTR_SECTION; \
387 ld r12,_MSR(r1); \
388 mfmsr r11; \
389 rlwimi r11,r12,0,MSR_EE; \
390 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
391 mtmsrd r11,1
392 413
393 #else /* hard enable/disable interrupts */ 414 #endif /* CONFIG_PPC_ISERIES */
394 #define DISABLE_INTS
395 415
396 #define ENABLE_INTS \ 416 #define ENABLE_INTS \
397 ld r12,_MSR(r1); \ 417 ld r12,_MSR(r1); \
398 mfmsr r11; \ 418 mfmsr r11; \
399 rlwimi r11,r12,0,MSR_EE; \ 419 rlwimi r11,r12,0,MSR_EE; \
400 mtmsrd r11,1 420 mtmsrd r11,1
401 421
402 #endif
403
404 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \ 422 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \
405 .align 7; \ 423 .align 7; \
406 .globl label##_common; \ 424 .globl label##_common; \
407 label##_common: \ 425 label##_common: \
408 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 426 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
409 DISABLE_INTS; \ 427 DISABLE_INTS; \
410 bl .save_nvgprs; \ 428 bl .save_nvgprs; \
411 addi r3,r1,STACK_FRAME_OVERHEAD; \ 429 addi r3,r1,STACK_FRAME_OVERHEAD; \
412 bl hdlr; \ 430 bl hdlr; \
413 b .ret_from_except 431 b .ret_from_except
414 432
415 /* 433 /*
416 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur 434 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
417 * in the idle task and therefore need the special idle handling. 435 * in the idle task and therefore need the special idle handling.
418 */ 436 */
419 #define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \ 437 #define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
420 .align 7; \ 438 .align 7; \
421 .globl label##_common; \ 439 .globl label##_common; \
422 label##_common: \ 440 label##_common: \
423 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 441 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
424 FINISH_NAP; \ 442 FINISH_NAP; \
425 DISABLE_INTS; \ 443 DISABLE_INTS; \
426 bl .save_nvgprs; \ 444 bl .save_nvgprs; \
427 addi r3,r1,STACK_FRAME_OVERHEAD; \ 445 addi r3,r1,STACK_FRAME_OVERHEAD; \
428 bl hdlr; \ 446 bl hdlr; \
429 b .ret_from_except 447 b .ret_from_except
430 448
431 #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ 449 #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
432 .align 7; \ 450 .align 7; \
433 .globl label##_common; \ 451 .globl label##_common; \
434 label##_common: \ 452 label##_common: \
435 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 453 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
436 FINISH_NAP; \ 454 FINISH_NAP; \
437 DISABLE_INTS; \ 455 DISABLE_INTS; \
438 bl .ppc64_runlatch_on; \ 456 bl .ppc64_runlatch_on; \
439 addi r3,r1,STACK_FRAME_OVERHEAD; \ 457 addi r3,r1,STACK_FRAME_OVERHEAD; \
440 bl hdlr; \ 458 bl hdlr; \
441 b .ret_from_except_lite 459 b .ret_from_except_lite
442 460
443 /* 461 /*
444 * When the idle code in power4_idle puts the CPU into NAP mode, 462 * When the idle code in power4_idle puts the CPU into NAP mode,
445 * it has to do so in a loop, and relies on the external interrupt 463 * it has to do so in a loop, and relies on the external interrupt
446 * and decrementer interrupt entry code to get it out of the loop. 464 * and decrementer interrupt entry code to get it out of the loop.
447 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags 465 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
448 * to signal that it is in the loop and needs help to get out. 466 * to signal that it is in the loop and needs help to get out.
449 */ 467 */
450 #ifdef CONFIG_PPC_970_NAP 468 #ifdef CONFIG_PPC_970_NAP
451 #define FINISH_NAP \ 469 #define FINISH_NAP \
452 BEGIN_FTR_SECTION \ 470 BEGIN_FTR_SECTION \
453 clrrdi r11,r1,THREAD_SHIFT; \ 471 clrrdi r11,r1,THREAD_SHIFT; \
454 ld r9,TI_LOCAL_FLAGS(r11); \ 472 ld r9,TI_LOCAL_FLAGS(r11); \
455 andi. r10,r9,_TLF_NAPPING; \ 473 andi. r10,r9,_TLF_NAPPING; \
456 bnel power4_fixup_nap; \ 474 bnel power4_fixup_nap; \
457 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) 475 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
458 #else 476 #else
459 #define FINISH_NAP 477 #define FINISH_NAP
460 #endif 478 #endif
461 479
462 /* 480 /*
463 * Start of pSeries system interrupt routines 481 * Start of pSeries system interrupt routines
464 */ 482 */
465 . = 0x100 483 . = 0x100
466 .globl __start_interrupts 484 .globl __start_interrupts
467 __start_interrupts: 485 __start_interrupts:
468 486
469 STD_EXCEPTION_PSERIES(0x100, system_reset) 487 STD_EXCEPTION_PSERIES(0x100, system_reset)
470 488
471 . = 0x200 489 . = 0x200
472 _machine_check_pSeries: 490 _machine_check_pSeries:
473 HMT_MEDIUM 491 HMT_MEDIUM
474 mtspr SPRN_SPRG1,r13 /* save r13 */ 492 mtspr SPRN_SPRG1,r13 /* save r13 */
475 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 493 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
476 494
477 . = 0x300 495 . = 0x300
478 .globl data_access_pSeries 496 .globl data_access_pSeries
479 data_access_pSeries: 497 data_access_pSeries:
480 HMT_MEDIUM 498 HMT_MEDIUM
481 mtspr SPRN_SPRG1,r13 499 mtspr SPRN_SPRG1,r13
482 BEGIN_FTR_SECTION 500 BEGIN_FTR_SECTION
483 mtspr SPRN_SPRG2,r12 501 mtspr SPRN_SPRG2,r12
484 mfspr r13,SPRN_DAR 502 mfspr r13,SPRN_DAR
485 mfspr r12,SPRN_DSISR 503 mfspr r12,SPRN_DSISR
486 srdi r13,r13,60 504 srdi r13,r13,60
487 rlwimi r13,r12,16,0x20 505 rlwimi r13,r12,16,0x20
488 mfcr r12 506 mfcr r12
489 cmpwi r13,0x2c 507 cmpwi r13,0x2c
490 beq .do_stab_bolted_pSeries 508 beq .do_stab_bolted_pSeries
491 mtcrf 0x80,r12 509 mtcrf 0x80,r12
492 mfspr r12,SPRN_SPRG2 510 mfspr r12,SPRN_SPRG2
493 END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 511 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
494 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) 512 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
495 513
496 . = 0x380 514 . = 0x380
497 .globl data_access_slb_pSeries 515 .globl data_access_slb_pSeries
498 data_access_slb_pSeries: 516 data_access_slb_pSeries:
499 HMT_MEDIUM 517 HMT_MEDIUM
500 mtspr SPRN_SPRG1,r13 518 mtspr SPRN_SPRG1,r13
501 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 519 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
502 std r3,PACA_EXSLB+EX_R3(r13) 520 std r3,PACA_EXSLB+EX_R3(r13)
503 mfspr r3,SPRN_DAR 521 mfspr r3,SPRN_DAR
504 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 522 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
505 mfcr r9 523 mfcr r9
506 #ifdef __DISABLED__ 524 #ifdef __DISABLED__
507 /* Keep that around for when we re-implement dynamic VSIDs */ 525 /* Keep that around for when we re-implement dynamic VSIDs */
508 cmpdi r3,0 526 cmpdi r3,0
509 bge slb_miss_user_pseries 527 bge slb_miss_user_pseries
510 #endif /* __DISABLED__ */ 528 #endif /* __DISABLED__ */
511 std r10,PACA_EXSLB+EX_R10(r13) 529 std r10,PACA_EXSLB+EX_R10(r13)
512 std r11,PACA_EXSLB+EX_R11(r13) 530 std r11,PACA_EXSLB+EX_R11(r13)
513 std r12,PACA_EXSLB+EX_R12(r13) 531 std r12,PACA_EXSLB+EX_R12(r13)
514 mfspr r10,SPRN_SPRG1 532 mfspr r10,SPRN_SPRG1
515 std r10,PACA_EXSLB+EX_R13(r13) 533 std r10,PACA_EXSLB+EX_R13(r13)
516 mfspr r12,SPRN_SRR1 /* and SRR1 */ 534 mfspr r12,SPRN_SRR1 /* and SRR1 */
517 b .slb_miss_realmode /* Rel. branch works in real mode */ 535 b .slb_miss_realmode /* Rel. branch works in real mode */
518 536
519 STD_EXCEPTION_PSERIES(0x400, instruction_access) 537 STD_EXCEPTION_PSERIES(0x400, instruction_access)
520 538
521 . = 0x480 539 . = 0x480
522 .globl instruction_access_slb_pSeries 540 .globl instruction_access_slb_pSeries
523 instruction_access_slb_pSeries: 541 instruction_access_slb_pSeries:
524 HMT_MEDIUM 542 HMT_MEDIUM
525 mtspr SPRN_SPRG1,r13 543 mtspr SPRN_SPRG1,r13
526 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 544 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
527 std r3,PACA_EXSLB+EX_R3(r13) 545 std r3,PACA_EXSLB+EX_R3(r13)
528 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 546 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
529 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 547 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
530 mfcr r9 548 mfcr r9
531 #ifdef __DISABLED__ 549 #ifdef __DISABLED__
532 /* Keep that around for when we re-implement dynamic VSIDs */ 550 /* Keep that around for when we re-implement dynamic VSIDs */
533 cmpdi r3,0 551 cmpdi r3,0
534 bge slb_miss_user_pseries 552 bge slb_miss_user_pseries
535 #endif /* __DISABLED__ */ 553 #endif /* __DISABLED__ */
536 std r10,PACA_EXSLB+EX_R10(r13) 554 std r10,PACA_EXSLB+EX_R10(r13)
537 std r11,PACA_EXSLB+EX_R11(r13) 555 std r11,PACA_EXSLB+EX_R11(r13)
538 std r12,PACA_EXSLB+EX_R12(r13) 556 std r12,PACA_EXSLB+EX_R12(r13)
539 mfspr r10,SPRN_SPRG1 557 mfspr r10,SPRN_SPRG1
540 std r10,PACA_EXSLB+EX_R13(r13) 558 std r10,PACA_EXSLB+EX_R13(r13)
541 mfspr r12,SPRN_SRR1 /* and SRR1 */ 559 mfspr r12,SPRN_SRR1 /* and SRR1 */
542 b .slb_miss_realmode /* Rel. branch works in real mode */ 560 b .slb_miss_realmode /* Rel. branch works in real mode */
543 561
544 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) 562 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
545 STD_EXCEPTION_PSERIES(0x600, alignment) 563 STD_EXCEPTION_PSERIES(0x600, alignment)
546 STD_EXCEPTION_PSERIES(0x700, program_check) 564 STD_EXCEPTION_PSERIES(0x700, program_check)
547 STD_EXCEPTION_PSERIES(0x800, fp_unavailable) 565 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
548 STD_EXCEPTION_PSERIES(0x900, decrementer) 566 MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
549 STD_EXCEPTION_PSERIES(0xa00, trap_0a) 567 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
550 STD_EXCEPTION_PSERIES(0xb00, trap_0b) 568 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
551 569
552 . = 0xc00 570 . = 0xc00
553 .globl system_call_pSeries 571 .globl system_call_pSeries
554 system_call_pSeries: 572 system_call_pSeries:
555 HMT_MEDIUM 573 HMT_MEDIUM
556 mr r9,r13 574 mr r9,r13
557 mfmsr r10 575 mfmsr r10
558 mfspr r13,SPRN_SPRG3 576 mfspr r13,SPRN_SPRG3
559 mfspr r11,SPRN_SRR0 577 mfspr r11,SPRN_SRR0
560 clrrdi r12,r13,32 578 clrrdi r12,r13,32
561 oris r12,r12,system_call_common@h 579 oris r12,r12,system_call_common@h
562 ori r12,r12,system_call_common@l 580 ori r12,r12,system_call_common@l
563 mtspr SPRN_SRR0,r12 581 mtspr SPRN_SRR0,r12
564 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 582 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
565 mfspr r12,SPRN_SRR1 583 mfspr r12,SPRN_SRR1
566 mtspr SPRN_SRR1,r10 584 mtspr SPRN_SRR1,r10
567 rfid 585 rfid
568 b . /* prevent speculative execution */ 586 b . /* prevent speculative execution */
569 587
570 STD_EXCEPTION_PSERIES(0xd00, single_step) 588 STD_EXCEPTION_PSERIES(0xd00, single_step)
571 STD_EXCEPTION_PSERIES(0xe00, trap_0e) 589 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
572 590
573 /* We need to deal with the Altivec unavailable exception 591 /* We need to deal with the Altivec unavailable exception
574 * here which is at 0xf20, thus in the middle of the 592 * here which is at 0xf20, thus in the middle of the
575 * prolog code of the PerformanceMonitor one. A little 593 * prolog code of the PerformanceMonitor one. A little
576 * trickery is thus necessary 594 * trickery is thus necessary
577 */ 595 */
578 . = 0xf00 596 . = 0xf00
579 b performance_monitor_pSeries 597 b performance_monitor_pSeries
580 598
581 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) 599 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
582 600
583 #ifdef CONFIG_CBE_RAS 601 #ifdef CONFIG_CBE_RAS
584 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) 602 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
585 #endif /* CONFIG_CBE_RAS */ 603 #endif /* CONFIG_CBE_RAS */
586 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 604 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
587 #ifdef CONFIG_CBE_RAS 605 #ifdef CONFIG_CBE_RAS
588 HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) 606 HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
589 #endif /* CONFIG_CBE_RAS */ 607 #endif /* CONFIG_CBE_RAS */
590 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 608 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
591 #ifdef CONFIG_CBE_RAS 609 #ifdef CONFIG_CBE_RAS
592 HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) 610 HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
593 #endif /* CONFIG_CBE_RAS */ 611 #endif /* CONFIG_CBE_RAS */
594 612
595 . = 0x3000 613 . = 0x3000
596 614
597 /*** pSeries interrupt support ***/ 615 /*** pSeries interrupt support ***/
598 616
599 /* moved from 0xf00 */ 617 /* moved from 0xf00 */
600 STD_EXCEPTION_PSERIES(., performance_monitor) 618 MASKABLE_EXCEPTION_PSERIES(., performance_monitor)
601 619
620 /*
621 * An interrupt came in while soft-disabled; clear EE in SRR1,
622 * clear paca->hard_enabled and return.
623 */
624 masked_interrupt:
625 stb r10,PACAHARDIRQEN(r13)
626 mtcrf 0x80,r9
627 ld r9,PACA_EXGEN+EX_R9(r13)
628 mfspr r10,SPRN_SRR1
629 rldicl r10,r10,48,1 /* clear MSR_EE */
630 rotldi r10,r10,16
631 mtspr SPRN_SRR1,r10
632 ld r10,PACA_EXGEN+EX_R10(r13)
633 mfspr r13,SPRN_SPRG1
634 rfid
635 b .
636
602 .align 7 637 .align 7
603 _GLOBAL(do_stab_bolted_pSeries) 638 _GLOBAL(do_stab_bolted_pSeries)
604 mtcrf 0x80,r12 639 mtcrf 0x80,r12
605 mfspr r12,SPRN_SPRG2 640 mfspr r12,SPRN_SPRG2
606 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 641 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
607 642
608 /* 643 /*
609 * We have some room here we use that to put 644 * We have some room here we use that to put
610 * the peries slb miss user trampoline code so it's reasonably 645 * the peries slb miss user trampoline code so it's reasonably
611 * away from slb_miss_user_common to avoid problems with rfid 646 * away from slb_miss_user_common to avoid problems with rfid
612 * 647 *
613 * This is used for when the SLB miss handler has to go virtual, 648 * This is used for when the SLB miss handler has to go virtual,
614 * which doesn't happen for now anymore but will once we re-implement 649 * which doesn't happen for now anymore but will once we re-implement
615 * dynamic VSIDs for shared page tables 650 * dynamic VSIDs for shared page tables
616 */ 651 */
617 #ifdef __DISABLED__ 652 #ifdef __DISABLED__
618 slb_miss_user_pseries: 653 slb_miss_user_pseries:
619 std r10,PACA_EXGEN+EX_R10(r13) 654 std r10,PACA_EXGEN+EX_R10(r13)
620 std r11,PACA_EXGEN+EX_R11(r13) 655 std r11,PACA_EXGEN+EX_R11(r13)
621 std r12,PACA_EXGEN+EX_R12(r13) 656 std r12,PACA_EXGEN+EX_R12(r13)
622 mfspr r10,SPRG1 657 mfspr r10,SPRG1
623 ld r11,PACA_EXSLB+EX_R9(r13) 658 ld r11,PACA_EXSLB+EX_R9(r13)
624 ld r12,PACA_EXSLB+EX_R3(r13) 659 ld r12,PACA_EXSLB+EX_R3(r13)
625 std r10,PACA_EXGEN+EX_R13(r13) 660 std r10,PACA_EXGEN+EX_R13(r13)
626 std r11,PACA_EXGEN+EX_R9(r13) 661 std r11,PACA_EXGEN+EX_R9(r13)
627 std r12,PACA_EXGEN+EX_R3(r13) 662 std r12,PACA_EXGEN+EX_R3(r13)
628 clrrdi r12,r13,32 663 clrrdi r12,r13,32
629 mfmsr r10 664 mfmsr r10
630 mfspr r11,SRR0 /* save SRR0 */ 665 mfspr r11,SRR0 /* save SRR0 */
631 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ 666 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
632 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 667 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
633 mtspr SRR0,r12 668 mtspr SRR0,r12
634 mfspr r12,SRR1 /* and SRR1 */ 669 mfspr r12,SRR1 /* and SRR1 */
635 mtspr SRR1,r10 670 mtspr SRR1,r10
636 rfid 671 rfid
637 b . /* prevent spec. execution */ 672 b . /* prevent spec. execution */
638 #endif /* __DISABLED__ */ 673 #endif /* __DISABLED__ */
639 674
640 /* 675 /*
641 * Vectors for the FWNMI option. Share common code. 676 * Vectors for the FWNMI option. Share common code.
642 */ 677 */
643 .globl system_reset_fwnmi 678 .globl system_reset_fwnmi
644 .align 7 679 .align 7
645 system_reset_fwnmi: 680 system_reset_fwnmi:
646 HMT_MEDIUM 681 HMT_MEDIUM
647 mtspr SPRN_SPRG1,r13 /* save r13 */ 682 mtspr SPRN_SPRG1,r13 /* save r13 */
648 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common) 683 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common)
649 684
650 .globl machine_check_fwnmi 685 .globl machine_check_fwnmi
651 .align 7 686 .align 7
652 machine_check_fwnmi: 687 machine_check_fwnmi:
653 HMT_MEDIUM 688 HMT_MEDIUM
654 mtspr SPRN_SPRG1,r13 /* save r13 */ 689 mtspr SPRN_SPRG1,r13 /* save r13 */
655 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common) 690 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common)
656 691
657 #ifdef CONFIG_PPC_ISERIES 692 #ifdef CONFIG_PPC_ISERIES
658 /*** ISeries-LPAR interrupt handlers ***/ 693 /*** ISeries-LPAR interrupt handlers ***/
659 694
660 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) 695 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
661 696
662 .globl data_access_iSeries 697 .globl data_access_iSeries
663 data_access_iSeries: 698 data_access_iSeries:
664 mtspr SPRN_SPRG1,r13 699 mtspr SPRN_SPRG1,r13
665 BEGIN_FTR_SECTION 700 BEGIN_FTR_SECTION
666 mtspr SPRN_SPRG2,r12 701 mtspr SPRN_SPRG2,r12
667 mfspr r13,SPRN_DAR 702 mfspr r13,SPRN_DAR
668 mfspr r12,SPRN_DSISR 703 mfspr r12,SPRN_DSISR
669 srdi r13,r13,60 704 srdi r13,r13,60
670 rlwimi r13,r12,16,0x20 705 rlwimi r13,r12,16,0x20
671 mfcr r12 706 mfcr r12
672 cmpwi r13,0x2c 707 cmpwi r13,0x2c
673 beq .do_stab_bolted_iSeries 708 beq .do_stab_bolted_iSeries
674 mtcrf 0x80,r12 709 mtcrf 0x80,r12
675 mfspr r12,SPRN_SPRG2 710 mfspr r12,SPRN_SPRG2
676 END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 711 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
677 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) 712 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
678 EXCEPTION_PROLOG_ISERIES_2 713 EXCEPTION_PROLOG_ISERIES_2
679 b data_access_common 714 b data_access_common
680 715
681 .do_stab_bolted_iSeries: 716 .do_stab_bolted_iSeries:
682 mtcrf 0x80,r12 717 mtcrf 0x80,r12
683 mfspr r12,SPRN_SPRG2 718 mfspr r12,SPRN_SPRG2
684 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 719 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
685 EXCEPTION_PROLOG_ISERIES_2 720 EXCEPTION_PROLOG_ISERIES_2
686 b .do_stab_bolted 721 b .do_stab_bolted
687 722
688 .globl data_access_slb_iSeries 723 .globl data_access_slb_iSeries
689 data_access_slb_iSeries: 724 data_access_slb_iSeries:
690 mtspr SPRN_SPRG1,r13 /* save r13 */ 725 mtspr SPRN_SPRG1,r13 /* save r13 */
691 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 726 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
692 std r3,PACA_EXSLB+EX_R3(r13) 727 std r3,PACA_EXSLB+EX_R3(r13)
693 mfspr r3,SPRN_DAR 728 mfspr r3,SPRN_DAR
694 std r9,PACA_EXSLB+EX_R9(r13) 729 std r9,PACA_EXSLB+EX_R9(r13)
695 mfcr r9 730 mfcr r9
696 #ifdef __DISABLED__ 731 #ifdef __DISABLED__
697 cmpdi r3,0 732 cmpdi r3,0
698 bge slb_miss_user_iseries 733 bge slb_miss_user_iseries
699 #endif 734 #endif
700 std r10,PACA_EXSLB+EX_R10(r13) 735 std r10,PACA_EXSLB+EX_R10(r13)
701 std r11,PACA_EXSLB+EX_R11(r13) 736 std r11,PACA_EXSLB+EX_R11(r13)
702 std r12,PACA_EXSLB+EX_R12(r13) 737 std r12,PACA_EXSLB+EX_R12(r13)
703 mfspr r10,SPRN_SPRG1 738 mfspr r10,SPRN_SPRG1
704 std r10,PACA_EXSLB+EX_R13(r13) 739 std r10,PACA_EXSLB+EX_R13(r13)
705 ld r12,PACALPPACAPTR(r13) 740 ld r12,PACALPPACAPTR(r13)
706 ld r12,LPPACASRR1(r12) 741 ld r12,LPPACASRR1(r12)
707 b .slb_miss_realmode 742 b .slb_miss_realmode
708 743
709 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) 744 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
710 745
711 .globl instruction_access_slb_iSeries 746 .globl instruction_access_slb_iSeries
712 instruction_access_slb_iSeries: 747 instruction_access_slb_iSeries:
713 mtspr SPRN_SPRG1,r13 /* save r13 */ 748 mtspr SPRN_SPRG1,r13 /* save r13 */
714 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ 749 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
715 std r3,PACA_EXSLB+EX_R3(r13) 750 std r3,PACA_EXSLB+EX_R3(r13)
716 ld r3,PACALPPACAPTR(r13) 751 ld r3,PACALPPACAPTR(r13)
717 ld r3,LPPACASRR0(r3) /* get SRR0 value */ 752 ld r3,LPPACASRR0(r3) /* get SRR0 value */
718 std r9,PACA_EXSLB+EX_R9(r13) 753 std r9,PACA_EXSLB+EX_R9(r13)
719 mfcr r9 754 mfcr r9
720 #ifdef __DISABLED__ 755 #ifdef __DISABLED__
721 cmpdi r3,0 756 cmpdi r3,0
722 bge .slb_miss_user_iseries 757 bge .slb_miss_user_iseries
723 #endif 758 #endif
724 std r10,PACA_EXSLB+EX_R10(r13) 759 std r10,PACA_EXSLB+EX_R10(r13)
725 std r11,PACA_EXSLB+EX_R11(r13) 760 std r11,PACA_EXSLB+EX_R11(r13)
726 std r12,PACA_EXSLB+EX_R12(r13) 761 std r12,PACA_EXSLB+EX_R12(r13)
727 mfspr r10,SPRN_SPRG1 762 mfspr r10,SPRN_SPRG1
728 std r10,PACA_EXSLB+EX_R13(r13) 763 std r10,PACA_EXSLB+EX_R13(r13)
729 ld r12,PACALPPACAPTR(r13) 764 ld r12,PACALPPACAPTR(r13)
730 ld r12,LPPACASRR1(r12) 765 ld r12,LPPACASRR1(r12)
731 b .slb_miss_realmode 766 b .slb_miss_realmode
732 767
733 #ifdef __DISABLED__ 768 #ifdef __DISABLED__
734 slb_miss_user_iseries: 769 slb_miss_user_iseries:
735 std r10,PACA_EXGEN+EX_R10(r13) 770 std r10,PACA_EXGEN+EX_R10(r13)
736 std r11,PACA_EXGEN+EX_R11(r13) 771 std r11,PACA_EXGEN+EX_R11(r13)
737 std r12,PACA_EXGEN+EX_R12(r13) 772 std r12,PACA_EXGEN+EX_R12(r13)
738 mfspr r10,SPRG1 773 mfspr r10,SPRG1
739 ld r11,PACA_EXSLB+EX_R9(r13) 774 ld r11,PACA_EXSLB+EX_R9(r13)
740 ld r12,PACA_EXSLB+EX_R3(r13) 775 ld r12,PACA_EXSLB+EX_R3(r13)
741 std r10,PACA_EXGEN+EX_R13(r13) 776 std r10,PACA_EXGEN+EX_R13(r13)
742 std r11,PACA_EXGEN+EX_R9(r13) 777 std r11,PACA_EXGEN+EX_R9(r13)
743 std r12,PACA_EXGEN+EX_R3(r13) 778 std r12,PACA_EXGEN+EX_R3(r13)
744 EXCEPTION_PROLOG_ISERIES_2 779 EXCEPTION_PROLOG_ISERIES_2
745 b slb_miss_user_common 780 b slb_miss_user_common
746 #endif 781 #endif
747 782
748 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) 783 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
749 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) 784 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
750 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN) 785 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
751 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN) 786 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
752 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer) 787 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
753 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN) 788 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
754 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN) 789 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
755 790
756 .globl system_call_iSeries 791 .globl system_call_iSeries
757 system_call_iSeries: 792 system_call_iSeries:
758 mr r9,r13 793 mr r9,r13
759 mfspr r13,SPRN_SPRG3 794 mfspr r13,SPRN_SPRG3
760 EXCEPTION_PROLOG_ISERIES_2 795 EXCEPTION_PROLOG_ISERIES_2
761 b system_call_common 796 b system_call_common
762 797
763 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN) 798 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
764 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN) 799 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
765 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN) 800 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
766 801
767 .globl system_reset_iSeries 802 .globl system_reset_iSeries
768 system_reset_iSeries: 803 system_reset_iSeries:
769 mfspr r13,SPRN_SPRG3 /* Get paca address */ 804 mfspr r13,SPRN_SPRG3 /* Get paca address */
770 mfmsr r24 805 mfmsr r24
771 ori r24,r24,MSR_RI 806 ori r24,r24,MSR_RI
772 mtmsrd r24 /* RI on */ 807 mtmsrd r24 /* RI on */
773 lhz r24,PACAPACAINDEX(r13) /* Get processor # */ 808 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
774 cmpwi 0,r24,0 /* Are we processor 0? */ 809 cmpwi 0,r24,0 /* Are we processor 0? */
775 beq .__start_initialization_iSeries /* Start up the first processor */ 810 beq .__start_initialization_iSeries /* Start up the first processor */
776 mfspr r4,SPRN_CTRLF 811 mfspr r4,SPRN_CTRLF
777 li r5,CTRL_RUNLATCH /* Turn off the run light */ 812 li r5,CTRL_RUNLATCH /* Turn off the run light */
778 andc r4,r4,r5 813 andc r4,r4,r5
779 mtspr SPRN_CTRLT,r4 814 mtspr SPRN_CTRLT,r4
780 815
781 1: 816 1:
782 HMT_LOW 817 HMT_LOW
783 #ifdef CONFIG_SMP 818 #ifdef CONFIG_SMP
784 lbz r23,PACAPROCSTART(r13) /* Test if this processor 819 lbz r23,PACAPROCSTART(r13) /* Test if this processor
785 * should start */ 820 * should start */
786 sync 821 sync
787 LOAD_REG_IMMEDIATE(r3,current_set) 822 LOAD_REG_IMMEDIATE(r3,current_set)
788 sldi r28,r24,3 /* get current_set[cpu#] */ 823 sldi r28,r24,3 /* get current_set[cpu#] */
789 ldx r3,r3,r28 824 ldx r3,r3,r28
790 addi r1,r3,THREAD_SIZE 825 addi r1,r3,THREAD_SIZE
791 subi r1,r1,STACK_FRAME_OVERHEAD 826 subi r1,r1,STACK_FRAME_OVERHEAD
792 827
793 cmpwi 0,r23,0 828 cmpwi 0,r23,0
794 beq iSeries_secondary_smp_loop /* Loop until told to go */ 829 beq iSeries_secondary_smp_loop /* Loop until told to go */
795 bne .__secondary_start /* Loop until told to go */ 830 bne .__secondary_start /* Loop until told to go */
796 iSeries_secondary_smp_loop: 831 iSeries_secondary_smp_loop:
797 /* Let the Hypervisor know we are alive */ 832 /* Let the Hypervisor know we are alive */
798 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 833 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
799 lis r3,0x8002 834 lis r3,0x8002
800 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ 835 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
801 #else /* CONFIG_SMP */ 836 #else /* CONFIG_SMP */
802 /* Yield the processor. This is required for non-SMP kernels 837 /* Yield the processor. This is required for non-SMP kernels
803 which are running on multi-threaded machines. */ 838 which are running on multi-threaded machines. */
804 lis r3,0x8000 839 lis r3,0x8000
805 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ 840 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
806 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ 841 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
807 li r4,0 /* "yield timed" */ 842 li r4,0 /* "yield timed" */
808 li r5,-1 /* "yield forever" */ 843 li r5,-1 /* "yield forever" */
809 #endif /* CONFIG_SMP */ 844 #endif /* CONFIG_SMP */
810 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 845 li r0,-1 /* r0=-1 indicates a Hypervisor call */
811 sc /* Invoke the hypervisor via a system call */ 846 sc /* Invoke the hypervisor via a system call */
812 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */ 847 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
813 b 1b /* If SMP not configured, secondaries 848 b 1b /* If SMP not configured, secondaries
814 * loop forever */ 849 * loop forever */
815 850
816 .globl decrementer_iSeries_masked 851 .globl decrementer_iSeries_masked
817 decrementer_iSeries_masked: 852 decrementer_iSeries_masked:
818 /* We may not have a valid TOC pointer in here. */ 853 /* We may not have a valid TOC pointer in here. */
819 li r11,1 854 li r11,1
820 ld r12,PACALPPACAPTR(r13) 855 ld r12,PACALPPACAPTR(r13)
821 stb r11,LPPACADECRINT(r12) 856 stb r11,LPPACADECRINT(r12)
822 LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy) 857 LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy)
823 lwz r12,0(r12) 858 lwz r12,0(r12)
824 mtspr SPRN_DEC,r12 859 mtspr SPRN_DEC,r12
825 /* fall through */ 860 /* fall through */
826 861
827 .globl hardware_interrupt_iSeries_masked 862 .globl hardware_interrupt_iSeries_masked
828 hardware_interrupt_iSeries_masked: 863 hardware_interrupt_iSeries_masked:
829 mtcrf 0x80,r9 /* Restore regs */ 864 mtcrf 0x80,r9 /* Restore regs */
830 ld r12,PACALPPACAPTR(r13) 865 ld r12,PACALPPACAPTR(r13)
831 ld r11,LPPACASRR0(r12) 866 ld r11,LPPACASRR0(r12)
832 ld r12,LPPACASRR1(r12) 867 ld r12,LPPACASRR1(r12)
833 mtspr SPRN_SRR0,r11 868 mtspr SPRN_SRR0,r11
834 mtspr SPRN_SRR1,r12 869 mtspr SPRN_SRR1,r12
835 ld r9,PACA_EXGEN+EX_R9(r13) 870 ld r9,PACA_EXGEN+EX_R9(r13)
836 ld r10,PACA_EXGEN+EX_R10(r13) 871 ld r10,PACA_EXGEN+EX_R10(r13)
837 ld r11,PACA_EXGEN+EX_R11(r13) 872 ld r11,PACA_EXGEN+EX_R11(r13)
838 ld r12,PACA_EXGEN+EX_R12(r13) 873 ld r12,PACA_EXGEN+EX_R12(r13)
839 ld r13,PACA_EXGEN+EX_R13(r13) 874 ld r13,PACA_EXGEN+EX_R13(r13)
840 rfid 875 rfid
841 b . /* prevent speculative execution */ 876 b . /* prevent speculative execution */
842 #endif /* CONFIG_PPC_ISERIES */ 877 #endif /* CONFIG_PPC_ISERIES */
843 878
844 /*** Common interrupt handlers ***/ 879 /*** Common interrupt handlers ***/
845 880
846 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 881 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
847 882
848 /* 883 /*
849 * Machine check is different because we use a different 884 * Machine check is different because we use a different
850 * save area: PACA_EXMC instead of PACA_EXGEN. 885 * save area: PACA_EXMC instead of PACA_EXGEN.
851 */ 886 */
852 .align 7 887 .align 7
853 .globl machine_check_common 888 .globl machine_check_common
854 machine_check_common: 889 machine_check_common:
855 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 890 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
856 FINISH_NAP 891 FINISH_NAP
857 DISABLE_INTS 892 DISABLE_INTS
858 bl .save_nvgprs 893 bl .save_nvgprs
859 addi r3,r1,STACK_FRAME_OVERHEAD 894 addi r3,r1,STACK_FRAME_OVERHEAD
860 bl .machine_check_exception 895 bl .machine_check_exception
861 b .ret_from_except 896 b .ret_from_except
862 897
863 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) 898 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
864 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) 899 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
865 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 900 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
866 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 901 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
867 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 902 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
868 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) 903 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
869 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 904 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
870 #ifdef CONFIG_ALTIVEC 905 #ifdef CONFIG_ALTIVEC
871 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 906 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
872 #else 907 #else
873 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 908 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
874 #endif 909 #endif
875 #ifdef CONFIG_CBE_RAS 910 #ifdef CONFIG_CBE_RAS
876 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 911 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
877 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 912 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
878 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 913 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
879 #endif /* CONFIG_CBE_RAS */ 914 #endif /* CONFIG_CBE_RAS */
880 915
881 /* 916 /*
882 * Here we have detected that the kernel stack pointer is bad. 917 * Here we have detected that the kernel stack pointer is bad.
883 * R9 contains the saved CR, r13 points to the paca, 918 * R9 contains the saved CR, r13 points to the paca,
884 * r10 contains the (bad) kernel stack pointer, 919 * r10 contains the (bad) kernel stack pointer,
885 * r11 and r12 contain the saved SRR0 and SRR1. 920 * r11 and r12 contain the saved SRR0 and SRR1.
886 * We switch to using an emergency stack, save the registers there, 921 * We switch to using an emergency stack, save the registers there,
887 * and call kernel_bad_stack(), which panics. 922 * and call kernel_bad_stack(), which panics.
888 */ 923 */
889 bad_stack: 924 bad_stack:
890 ld r1,PACAEMERGSP(r13) 925 ld r1,PACAEMERGSP(r13)
891 subi r1,r1,64+INT_FRAME_SIZE 926 subi r1,r1,64+INT_FRAME_SIZE
892 std r9,_CCR(r1) 927 std r9,_CCR(r1)
893 std r10,GPR1(r1) 928 std r10,GPR1(r1)
894 std r11,_NIP(r1) 929 std r11,_NIP(r1)
895 std r12,_MSR(r1) 930 std r12,_MSR(r1)
896 mfspr r11,SPRN_DAR 931 mfspr r11,SPRN_DAR
897 mfspr r12,SPRN_DSISR 932 mfspr r12,SPRN_DSISR
898 std r11,_DAR(r1) 933 std r11,_DAR(r1)
899 std r12,_DSISR(r1) 934 std r12,_DSISR(r1)
900 mflr r10 935 mflr r10
901 mfctr r11 936 mfctr r11
902 mfxer r12 937 mfxer r12
903 std r10,_LINK(r1) 938 std r10,_LINK(r1)
904 std r11,_CTR(r1) 939 std r11,_CTR(r1)
905 std r12,_XER(r1) 940 std r12,_XER(r1)
906 SAVE_GPR(0,r1) 941 SAVE_GPR(0,r1)
907 SAVE_GPR(2,r1) 942 SAVE_GPR(2,r1)
908 SAVE_4GPRS(3,r1) 943 SAVE_4GPRS(3,r1)
909 SAVE_2GPRS(7,r1) 944 SAVE_2GPRS(7,r1)
910 SAVE_10GPRS(12,r1) 945 SAVE_10GPRS(12,r1)
911 SAVE_10GPRS(22,r1) 946 SAVE_10GPRS(22,r1)
912 addi r11,r1,INT_FRAME_SIZE 947 addi r11,r1,INT_FRAME_SIZE
913 std r11,0(r1) 948 std r11,0(r1)
914 li r12,0 949 li r12,0
915 std r12,0(r11) 950 std r12,0(r11)
916 ld r2,PACATOC(r13) 951 ld r2,PACATOC(r13)
917 1: addi r3,r1,STACK_FRAME_OVERHEAD 952 1: addi r3,r1,STACK_FRAME_OVERHEAD
918 bl .kernel_bad_stack 953 bl .kernel_bad_stack
919 b 1b 954 b 1b
920 955
921 /* 956 /*
922 * Return from an exception with minimal checks. 957 * Return from an exception with minimal checks.
923 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. 958 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
924 * If interrupts have been enabled, or anything has been 959 * If interrupts have been enabled, or anything has been
925 * done that might have changed the scheduling status of 960 * done that might have changed the scheduling status of
926 * any task or sent any task a signal, you should use 961 * any task or sent any task a signal, you should use
927 * ret_from_except or ret_from_except_lite instead of this. 962 * ret_from_except or ret_from_except_lite instead of this.
928 */ 963 */
929 .globl fast_exception_return 964 .globl fast_exception_return
930 fast_exception_return: 965 fast_exception_return:
931 ld r12,_MSR(r1) 966 ld r12,_MSR(r1)
932 ld r11,_NIP(r1) 967 ld r11,_NIP(r1)
933 andi. r3,r12,MSR_RI /* check if RI is set */ 968 andi. r3,r12,MSR_RI /* check if RI is set */
934 beq- unrecov_fer 969 beq- unrecov_fer
935 970
936 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 971 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
937 andi. r3,r12,MSR_PR 972 andi. r3,r12,MSR_PR
938 beq 2f 973 beq 2f
939 ACCOUNT_CPU_USER_EXIT(r3, r4) 974 ACCOUNT_CPU_USER_EXIT(r3, r4)
940 2: 975 2:
941 #endif 976 #endif
942 977
943 ld r3,_CCR(r1) 978 ld r3,_CCR(r1)
944 ld r4,_LINK(r1) 979 ld r4,_LINK(r1)
945 ld r5,_CTR(r1) 980 ld r5,_CTR(r1)
946 ld r6,_XER(r1) 981 ld r6,_XER(r1)
947 mtcr r3 982 mtcr r3
948 mtlr r4 983 mtlr r4
949 mtctr r5 984 mtctr r5
950 mtxer r6 985 mtxer r6
951 REST_GPR(0, r1) 986 REST_GPR(0, r1)
952 REST_8GPRS(2, r1) 987 REST_8GPRS(2, r1)
953 988
954 mfmsr r10 989 mfmsr r10
955 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ 990 rldicl r10,r10,48,1 /* clear EE */
991 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
956 mtmsrd r10,1 992 mtmsrd r10,1
957 993
958 mtspr SPRN_SRR1,r12 994 mtspr SPRN_SRR1,r12
959 mtspr SPRN_SRR0,r11 995 mtspr SPRN_SRR0,r11
960 REST_4GPRS(10, r1) 996 REST_4GPRS(10, r1)
961 ld r1,GPR1(r1) 997 ld r1,GPR1(r1)
962 rfid 998 rfid
963 b . /* prevent speculative execution */ 999 b . /* prevent speculative execution */
964 1000
965 unrecov_fer: 1001 unrecov_fer:
966 bl .save_nvgprs 1002 bl .save_nvgprs
967 1: addi r3,r1,STACK_FRAME_OVERHEAD 1003 1: addi r3,r1,STACK_FRAME_OVERHEAD
968 bl .unrecoverable_exception 1004 bl .unrecoverable_exception
969 b 1b 1005 b 1b
970 1006
971 /* 1007 /*
972 * Here r13 points to the paca, r9 contains the saved CR, 1008 * Here r13 points to the paca, r9 contains the saved CR,
973 * SRR0 and SRR1 are saved in r11 and r12, 1009 * SRR0 and SRR1 are saved in r11 and r12,
974 * r9 - r13 are saved in paca->exgen. 1010 * r9 - r13 are saved in paca->exgen.
975 */ 1011 */
976 .align 7 1012 .align 7
977 .globl data_access_common 1013 .globl data_access_common
978 data_access_common: 1014 data_access_common:
979 mfspr r10,SPRN_DAR 1015 mfspr r10,SPRN_DAR
980 std r10,PACA_EXGEN+EX_DAR(r13) 1016 std r10,PACA_EXGEN+EX_DAR(r13)
981 mfspr r10,SPRN_DSISR 1017 mfspr r10,SPRN_DSISR
982 stw r10,PACA_EXGEN+EX_DSISR(r13) 1018 stw r10,PACA_EXGEN+EX_DSISR(r13)
983 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 1019 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
984 ld r3,PACA_EXGEN+EX_DAR(r13) 1020 ld r3,PACA_EXGEN+EX_DAR(r13)
985 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1021 lwz r4,PACA_EXGEN+EX_DSISR(r13)
986 li r5,0x300 1022 li r5,0x300
987 b .do_hash_page /* Try to handle as hpte fault */ 1023 b .do_hash_page /* Try to handle as hpte fault */
988 1024
989 .align 7 1025 .align 7
990 .globl instruction_access_common 1026 .globl instruction_access_common
991 instruction_access_common: 1027 instruction_access_common:
992 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 1028 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
993 ld r3,_NIP(r1) 1029 ld r3,_NIP(r1)
994 andis. r4,r12,0x5820 1030 andis. r4,r12,0x5820
995 li r5,0x400 1031 li r5,0x400
996 b .do_hash_page /* Try to handle as hpte fault */ 1032 b .do_hash_page /* Try to handle as hpte fault */
997 1033
998 /* 1034 /*
999 * Here is the common SLB miss user that is used when going to virtual 1035 * Here is the common SLB miss user that is used when going to virtual
1000 * mode for SLB misses, that is currently not used 1036 * mode for SLB misses, that is currently not used
1001 */ 1037 */
1002 #ifdef __DISABLED__ 1038 #ifdef __DISABLED__
1003 .align 7 1039 .align 7
1004 .globl slb_miss_user_common 1040 .globl slb_miss_user_common
1005 slb_miss_user_common: 1041 slb_miss_user_common:
1006 mflr r10 1042 mflr r10
1007 std r3,PACA_EXGEN+EX_DAR(r13) 1043 std r3,PACA_EXGEN+EX_DAR(r13)
1008 stw r9,PACA_EXGEN+EX_CCR(r13) 1044 stw r9,PACA_EXGEN+EX_CCR(r13)
1009 std r10,PACA_EXGEN+EX_LR(r13) 1045 std r10,PACA_EXGEN+EX_LR(r13)
1010 std r11,PACA_EXGEN+EX_SRR0(r13) 1046 std r11,PACA_EXGEN+EX_SRR0(r13)
1011 bl .slb_allocate_user 1047 bl .slb_allocate_user
1012 1048
1013 ld r10,PACA_EXGEN+EX_LR(r13) 1049 ld r10,PACA_EXGEN+EX_LR(r13)
1014 ld r3,PACA_EXGEN+EX_R3(r13) 1050 ld r3,PACA_EXGEN+EX_R3(r13)
1015 lwz r9,PACA_EXGEN+EX_CCR(r13) 1051 lwz r9,PACA_EXGEN+EX_CCR(r13)
1016 ld r11,PACA_EXGEN+EX_SRR0(r13) 1052 ld r11,PACA_EXGEN+EX_SRR0(r13)
1017 mtlr r10 1053 mtlr r10
1018 beq- slb_miss_fault 1054 beq- slb_miss_fault
1019 1055
1020 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1056 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1021 beq- unrecov_user_slb 1057 beq- unrecov_user_slb
1022 mfmsr r10 1058 mfmsr r10
1023 1059
1024 .machine push 1060 .machine push
1025 .machine "power4" 1061 .machine "power4"
1026 mtcrf 0x80,r9 1062 mtcrf 0x80,r9
1027 .machine pop 1063 .machine pop
1028 1064
1029 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ 1065 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
1030 mtmsrd r10,1 1066 mtmsrd r10,1
1031 1067
1032 mtspr SRR0,r11 1068 mtspr SRR0,r11
1033 mtspr SRR1,r12 1069 mtspr SRR1,r12
1034 1070
1035 ld r9,PACA_EXGEN+EX_R9(r13) 1071 ld r9,PACA_EXGEN+EX_R9(r13)
1036 ld r10,PACA_EXGEN+EX_R10(r13) 1072 ld r10,PACA_EXGEN+EX_R10(r13)
1037 ld r11,PACA_EXGEN+EX_R11(r13) 1073 ld r11,PACA_EXGEN+EX_R11(r13)
1038 ld r12,PACA_EXGEN+EX_R12(r13) 1074 ld r12,PACA_EXGEN+EX_R12(r13)
1039 ld r13,PACA_EXGEN+EX_R13(r13) 1075 ld r13,PACA_EXGEN+EX_R13(r13)
1040 rfid 1076 rfid
1041 b . 1077 b .
1042 1078
1043 slb_miss_fault: 1079 slb_miss_fault:
1044 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) 1080 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1045 ld r4,PACA_EXGEN+EX_DAR(r13) 1081 ld r4,PACA_EXGEN+EX_DAR(r13)
1046 li r5,0 1082 li r5,0
1047 std r4,_DAR(r1) 1083 std r4,_DAR(r1)
1048 std r5,_DSISR(r1) 1084 std r5,_DSISR(r1)
1049 b .handle_page_fault 1085 b .handle_page_fault
1050 1086
1051 unrecov_user_slb: 1087 unrecov_user_slb:
1052 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 1088 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1053 DISABLE_INTS 1089 DISABLE_INTS
1054 bl .save_nvgprs 1090 bl .save_nvgprs
1055 1: addi r3,r1,STACK_FRAME_OVERHEAD 1091 1: addi r3,r1,STACK_FRAME_OVERHEAD
1056 bl .unrecoverable_exception 1092 bl .unrecoverable_exception
1057 b 1b 1093 b 1b
1058 1094
1059 #endif /* __DISABLED__ */ 1095 #endif /* __DISABLED__ */
1060 1096
1061 1097
1062 /* 1098 /*
1063 * r13 points to the PACA, r9 contains the saved CR, 1099 * r13 points to the PACA, r9 contains the saved CR,
1064 * r12 contain the saved SRR1, SRR0 is still ready for return 1100 * r12 contain the saved SRR1, SRR0 is still ready for return
1065 * r3 has the faulting address 1101 * r3 has the faulting address
1066 * r9 - r13 are saved in paca->exslb. 1102 * r9 - r13 are saved in paca->exslb.
1067 * r3 is saved in paca->slb_r3 1103 * r3 is saved in paca->slb_r3
1068 * We assume we aren't going to take any exceptions during this procedure. 1104 * We assume we aren't going to take any exceptions during this procedure.
1069 */ 1105 */
1070 _GLOBAL(slb_miss_realmode) 1106 _GLOBAL(slb_miss_realmode)
1071 mflr r10 1107 mflr r10
1072 1108
1073 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1109 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1074 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 1110 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1075 1111
1076 bl .slb_allocate_realmode 1112 bl .slb_allocate_realmode
1077 1113
1078 /* All done -- return from exception. */ 1114 /* All done -- return from exception. */
1079 1115
1080 ld r10,PACA_EXSLB+EX_LR(r13) 1116 ld r10,PACA_EXSLB+EX_LR(r13)
1081 ld r3,PACA_EXSLB+EX_R3(r13) 1117 ld r3,PACA_EXSLB+EX_R3(r13)
1082 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1118 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1083 #ifdef CONFIG_PPC_ISERIES 1119 #ifdef CONFIG_PPC_ISERIES
1084 BEGIN_FW_FTR_SECTION 1120 BEGIN_FW_FTR_SECTION
1085 ld r11,PACALPPACAPTR(r13) 1121 ld r11,PACALPPACAPTR(r13)
1086 ld r11,LPPACASRR0(r11) /* get SRR0 value */ 1122 ld r11,LPPACASRR0(r11) /* get SRR0 value */
1087 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 1123 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1088 #endif /* CONFIG_PPC_ISERIES */ 1124 #endif /* CONFIG_PPC_ISERIES */
1089 1125
1090 mtlr r10 1126 mtlr r10
1091 1127
1092 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1128 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1093 beq- unrecov_slb 1129 beq- unrecov_slb
1094 1130
1095 .machine push 1131 .machine push
1096 .machine "power4" 1132 .machine "power4"
1097 mtcrf 0x80,r9 1133 mtcrf 0x80,r9
1098 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 1134 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1099 .machine pop 1135 .machine pop
1100 1136
1101 #ifdef CONFIG_PPC_ISERIES 1137 #ifdef CONFIG_PPC_ISERIES
1102 BEGIN_FW_FTR_SECTION 1138 BEGIN_FW_FTR_SECTION
1103 mtspr SPRN_SRR0,r11 1139 mtspr SPRN_SRR0,r11
1104 mtspr SPRN_SRR1,r12 1140 mtspr SPRN_SRR1,r12
1105 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 1141 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1106 #endif /* CONFIG_PPC_ISERIES */ 1142 #endif /* CONFIG_PPC_ISERIES */
1107 ld r9,PACA_EXSLB+EX_R9(r13) 1143 ld r9,PACA_EXSLB+EX_R9(r13)
1108 ld r10,PACA_EXSLB+EX_R10(r13) 1144 ld r10,PACA_EXSLB+EX_R10(r13)
1109 ld r11,PACA_EXSLB+EX_R11(r13) 1145 ld r11,PACA_EXSLB+EX_R11(r13)
1110 ld r12,PACA_EXSLB+EX_R12(r13) 1146 ld r12,PACA_EXSLB+EX_R12(r13)
1111 ld r13,PACA_EXSLB+EX_R13(r13) 1147 ld r13,PACA_EXSLB+EX_R13(r13)
1112 rfid 1148 rfid
1113 b . /* prevent speculative execution */ 1149 b . /* prevent speculative execution */
1114 1150
1115 unrecov_slb: 1151 unrecov_slb:
1116 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 1152 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1117 DISABLE_INTS 1153 DISABLE_INTS
1118 bl .save_nvgprs 1154 bl .save_nvgprs
1119 1: addi r3,r1,STACK_FRAME_OVERHEAD 1155 1: addi r3,r1,STACK_FRAME_OVERHEAD
1120 bl .unrecoverable_exception 1156 bl .unrecoverable_exception
1121 b 1b 1157 b 1b
1122 1158
1123 .align 7 1159 .align 7
1124 .globl hardware_interrupt_common 1160 .globl hardware_interrupt_common
1125 .globl hardware_interrupt_entry 1161 .globl hardware_interrupt_entry
1126 hardware_interrupt_common: 1162 hardware_interrupt_common:
1127 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) 1163 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
1128 FINISH_NAP 1164 FINISH_NAP
1129 hardware_interrupt_entry: 1165 hardware_interrupt_entry:
1130 DISABLE_INTS 1166 DISABLE_INTS
1131 bl .ppc64_runlatch_on 1167 bl .ppc64_runlatch_on
1132 addi r3,r1,STACK_FRAME_OVERHEAD 1168 addi r3,r1,STACK_FRAME_OVERHEAD
1133 bl .do_IRQ 1169 bl .do_IRQ
1134 b .ret_from_except_lite 1170 b .ret_from_except_lite
1135 1171
1136 #ifdef CONFIG_PPC_970_NAP 1172 #ifdef CONFIG_PPC_970_NAP
1137 power4_fixup_nap: 1173 power4_fixup_nap:
1138 andc r9,r9,r10 1174 andc r9,r9,r10
1139 std r9,TI_LOCAL_FLAGS(r11) 1175 std r9,TI_LOCAL_FLAGS(r11)
1140 ld r10,_LINK(r1) /* make idle task do the */ 1176 ld r10,_LINK(r1) /* make idle task do the */
1141 std r10,_NIP(r1) /* equivalent of a blr */ 1177 std r10,_NIP(r1) /* equivalent of a blr */
1142 blr 1178 blr
1143 #endif 1179 #endif
1144 1180
1145 .align 7 1181 .align 7
1146 .globl alignment_common 1182 .globl alignment_common
1147 alignment_common: 1183 alignment_common:
1148 mfspr r10,SPRN_DAR 1184 mfspr r10,SPRN_DAR
1149 std r10,PACA_EXGEN+EX_DAR(r13) 1185 std r10,PACA_EXGEN+EX_DAR(r13)
1150 mfspr r10,SPRN_DSISR 1186 mfspr r10,SPRN_DSISR
1151 stw r10,PACA_EXGEN+EX_DSISR(r13) 1187 stw r10,PACA_EXGEN+EX_DSISR(r13)
1152 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 1188 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1153 ld r3,PACA_EXGEN+EX_DAR(r13) 1189 ld r3,PACA_EXGEN+EX_DAR(r13)
1154 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1190 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1155 std r3,_DAR(r1) 1191 std r3,_DAR(r1)
1156 std r4,_DSISR(r1) 1192 std r4,_DSISR(r1)
1157 bl .save_nvgprs 1193 bl .save_nvgprs
1158 addi r3,r1,STACK_FRAME_OVERHEAD 1194 addi r3,r1,STACK_FRAME_OVERHEAD
1159 ENABLE_INTS 1195 ENABLE_INTS
1160 bl .alignment_exception 1196 bl .alignment_exception
1161 b .ret_from_except 1197 b .ret_from_except
1162 1198
1163 .align 7 1199 .align 7
1164 .globl program_check_common 1200 .globl program_check_common
1165 program_check_common: 1201 program_check_common:
1166 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 1202 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1167 bl .save_nvgprs 1203 bl .save_nvgprs
1168 addi r3,r1,STACK_FRAME_OVERHEAD 1204 addi r3,r1,STACK_FRAME_OVERHEAD
1169 ENABLE_INTS 1205 ENABLE_INTS
1170 bl .program_check_exception 1206 bl .program_check_exception
1171 b .ret_from_except 1207 b .ret_from_except
1172 1208
1173 .align 7 1209 .align 7
1174 .globl fp_unavailable_common 1210 .globl fp_unavailable_common
1175 fp_unavailable_common: 1211 fp_unavailable_common:
1176 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 1212 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1177 bne .load_up_fpu /* if from user, just load it up */ 1213 bne .load_up_fpu /* if from user, just load it up */
1178 bl .save_nvgprs 1214 bl .save_nvgprs
1179 addi r3,r1,STACK_FRAME_OVERHEAD 1215 addi r3,r1,STACK_FRAME_OVERHEAD
1180 ENABLE_INTS 1216 ENABLE_INTS
1181 bl .kernel_fp_unavailable_exception 1217 bl .kernel_fp_unavailable_exception
1182 BUG_OPCODE 1218 BUG_OPCODE
1183 1219
1184 .align 7 1220 .align 7
1185 .globl altivec_unavailable_common 1221 .globl altivec_unavailable_common
1186 altivec_unavailable_common: 1222 altivec_unavailable_common:
1187 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) 1223 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1188 #ifdef CONFIG_ALTIVEC 1224 #ifdef CONFIG_ALTIVEC
1189 BEGIN_FTR_SECTION 1225 BEGIN_FTR_SECTION
1190 bne .load_up_altivec /* if from user, just load it up */ 1226 bne .load_up_altivec /* if from user, just load it up */
1191 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1227 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1192 #endif 1228 #endif
1193 bl .save_nvgprs 1229 bl .save_nvgprs
1194 addi r3,r1,STACK_FRAME_OVERHEAD 1230 addi r3,r1,STACK_FRAME_OVERHEAD
1195 ENABLE_INTS 1231 ENABLE_INTS
1196 bl .altivec_unavailable_exception 1232 bl .altivec_unavailable_exception
1197 b .ret_from_except 1233 b .ret_from_except
1198 1234
1199 #ifdef CONFIG_ALTIVEC 1235 #ifdef CONFIG_ALTIVEC
1200 /* 1236 /*
1201 * load_up_altivec(unused, unused, tsk) 1237 * load_up_altivec(unused, unused, tsk)
1202 * Disable VMX for the task which had it previously, 1238 * Disable VMX for the task which had it previously,
1203 * and save its vector registers in its thread_struct. 1239 * and save its vector registers in its thread_struct.
1204 * Enables the VMX for use in the kernel on return. 1240 * Enables the VMX for use in the kernel on return.
1205 * On SMP we know the VMX is free, since we give it up every 1241 * On SMP we know the VMX is free, since we give it up every
1206 * switch (ie, no lazy save of the vector registers). 1242 * switch (ie, no lazy save of the vector registers).
1207 * On entry: r13 == 'current' && last_task_used_altivec != 'current' 1243 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1208 */ 1244 */
1209 _STATIC(load_up_altivec) 1245 _STATIC(load_up_altivec)
1210 mfmsr r5 /* grab the current MSR */ 1246 mfmsr r5 /* grab the current MSR */
1211 oris r5,r5,MSR_VEC@h 1247 oris r5,r5,MSR_VEC@h
1212 mtmsrd r5 /* enable use of VMX now */ 1248 mtmsrd r5 /* enable use of VMX now */
1213 isync 1249 isync
1214 1250
1215 /* 1251 /*
1216 * For SMP, we don't do lazy VMX switching because it just gets too 1252 * For SMP, we don't do lazy VMX switching because it just gets too
1217 * horrendously complex, especially when a task switches from one CPU 1253 * horrendously complex, especially when a task switches from one CPU
1218 * to another. Instead we call giveup_altvec in switch_to. 1254 * to another. Instead we call giveup_altvec in switch_to.
1219 * VRSAVE isn't dealt with here, that is done in the normal context 1255 * VRSAVE isn't dealt with here, that is done in the normal context
1220 * switch code. Note that we could rely on vrsave value to eventually 1256 * switch code. Note that we could rely on vrsave value to eventually
1221 * avoid saving all of the VREGs here... 1257 * avoid saving all of the VREGs here...
1222 */ 1258 */
1223 #ifndef CONFIG_SMP 1259 #ifndef CONFIG_SMP
1224 ld r3,last_task_used_altivec@got(r2) 1260 ld r3,last_task_used_altivec@got(r2)
1225 ld r4,0(r3) 1261 ld r4,0(r3)
1226 cmpdi 0,r4,0 1262 cmpdi 0,r4,0
1227 beq 1f 1263 beq 1f
1228 /* Save VMX state to last_task_used_altivec's THREAD struct */ 1264 /* Save VMX state to last_task_used_altivec's THREAD struct */
1229 addi r4,r4,THREAD 1265 addi r4,r4,THREAD
1230 SAVE_32VRS(0,r5,r4) 1266 SAVE_32VRS(0,r5,r4)
1231 mfvscr vr0 1267 mfvscr vr0
1232 li r10,THREAD_VSCR 1268 li r10,THREAD_VSCR
1233 stvx vr0,r10,r4 1269 stvx vr0,r10,r4
1234 /* Disable VMX for last_task_used_altivec */ 1270 /* Disable VMX for last_task_used_altivec */
1235 ld r5,PT_REGS(r4) 1271 ld r5,PT_REGS(r4)
1236 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1272 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1237 lis r6,MSR_VEC@h 1273 lis r6,MSR_VEC@h
1238 andc r4,r4,r6 1274 andc r4,r4,r6
1239 std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1275 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1240 1: 1276 1:
1241 #endif /* CONFIG_SMP */ 1277 #endif /* CONFIG_SMP */
1242 /* Hack: if we get an altivec unavailable trap with VRSAVE 1278 /* Hack: if we get an altivec unavailable trap with VRSAVE
1243 * set to all zeros, we assume this is a broken application 1279 * set to all zeros, we assume this is a broken application
1244 * that fails to set it properly, and thus we switch it to 1280 * that fails to set it properly, and thus we switch it to
1245 * all 1's 1281 * all 1's
1246 */ 1282 */
1247 mfspr r4,SPRN_VRSAVE 1283 mfspr r4,SPRN_VRSAVE
1248 cmpdi 0,r4,0 1284 cmpdi 0,r4,0
1249 bne+ 1f 1285 bne+ 1f
1250 li r4,-1 1286 li r4,-1
1251 mtspr SPRN_VRSAVE,r4 1287 mtspr SPRN_VRSAVE,r4
1252 1: 1288 1:
1253 /* enable use of VMX after return */ 1289 /* enable use of VMX after return */
1254 ld r4,PACACURRENT(r13) 1290 ld r4,PACACURRENT(r13)
1255 addi r5,r4,THREAD /* Get THREAD */ 1291 addi r5,r4,THREAD /* Get THREAD */
1256 oris r12,r12,MSR_VEC@h 1292 oris r12,r12,MSR_VEC@h
1257 std r12,_MSR(r1) 1293 std r12,_MSR(r1)
1258 li r4,1 1294 li r4,1
1259 li r10,THREAD_VSCR 1295 li r10,THREAD_VSCR
1260 stw r4,THREAD_USED_VR(r5) 1296 stw r4,THREAD_USED_VR(r5)
1261 lvx vr0,r10,r5 1297 lvx vr0,r10,r5
1262 mtvscr vr0 1298 mtvscr vr0
1263 REST_32VRS(0,r4,r5) 1299 REST_32VRS(0,r4,r5)
1264 #ifndef CONFIG_SMP 1300 #ifndef CONFIG_SMP
1265 /* Update last_task_used_math to 'current' */ 1301 /* Update last_task_used_math to 'current' */
1266 subi r4,r5,THREAD /* Back to 'current' */ 1302 subi r4,r5,THREAD /* Back to 'current' */
1267 std r4,0(r3) 1303 std r4,0(r3)
1268 #endif /* CONFIG_SMP */ 1304 #endif /* CONFIG_SMP */
1269 /* restore registers and return */ 1305 /* restore registers and return */
1270 b fast_exception_return 1306 b fast_exception_return
1271 #endif /* CONFIG_ALTIVEC */ 1307 #endif /* CONFIG_ALTIVEC */
1272 1308
1273 /* 1309 /*
1274 * Hash table stuff 1310 * Hash table stuff
1275 */ 1311 */
1276 .align 7 1312 .align 7
1277 _GLOBAL(do_hash_page) 1313 _GLOBAL(do_hash_page)
1278 std r3,_DAR(r1) 1314 std r3,_DAR(r1)
1279 std r4,_DSISR(r1) 1315 std r4,_DSISR(r1)
1280 1316
1281 andis. r0,r4,0xa450 /* weird error? */ 1317 andis. r0,r4,0xa450 /* weird error? */
1282 bne- .handle_page_fault /* if not, try to insert a HPTE */ 1318 bne- .handle_page_fault /* if not, try to insert a HPTE */
1283 BEGIN_FTR_SECTION 1319 BEGIN_FTR_SECTION
1284 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 1320 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1285 bne- .do_ste_alloc /* If so handle it */ 1321 bne- .do_ste_alloc /* If so handle it */
1286 END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 1322 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1287 1323
1288 /* 1324 /*
1289 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 1325 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1290 * accessing a userspace segment (even from the kernel). We assume 1326 * accessing a userspace segment (even from the kernel). We assume
1291 * kernel addresses always have the high bit set. 1327 * kernel addresses always have the high bit set.
1292 */ 1328 */
1293 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ 1329 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1294 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ 1330 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1295 orc r0,r12,r0 /* MSR_PR | ~high_bit */ 1331 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1296 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ 1332 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1297 ori r4,r4,1 /* add _PAGE_PRESENT */ 1333 ori r4,r4,1 /* add _PAGE_PRESENT */
1298 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ 1334 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1299 1335
1300 /* 1336 /*
1301 * On iSeries, we soft-disable interrupts here, then 1337 * On iSeries, we soft-disable interrupts here, then
1302 * hard-enable interrupts so that the hash_page code can spin on 1338 * hard-enable interrupts so that the hash_page code can spin on
1303 * the hash_table_lock without problems on a shared processor. 1339 * the hash_table_lock without problems on a shared processor.
1304 */ 1340 */
1305 DISABLE_INTS 1341 DISABLE_INTS
1306 1342
1307 /* 1343 /*
1308 * r3 contains the faulting address 1344 * r3 contains the faulting address
1309 * r4 contains the required access permissions 1345 * r4 contains the required access permissions
1310 * r5 contains the trap number 1346 * r5 contains the trap number
1311 * 1347 *
1312 * at return r3 = 0 for success 1348 * at return r3 = 0 for success
1313 */ 1349 */
1314 bl .hash_page /* build HPTE if possible */ 1350 bl .hash_page /* build HPTE if possible */
1315 cmpdi r3,0 /* see if hash_page succeeded */ 1351 cmpdi r3,0 /* see if hash_page succeeded */
1316 1352
1317 #ifdef DO_SOFT_DISABLE 1353 #ifdef DO_SOFT_DISABLE
1318 BEGIN_FW_FTR_SECTION 1354 BEGIN_FW_FTR_SECTION
1319 /* 1355 /*
1320 * If we had interrupts soft-enabled at the point where the 1356 * If we had interrupts soft-enabled at the point where the
1321 * DSI/ISI occurred, and an interrupt came in during hash_page, 1357 * DSI/ISI occurred, and an interrupt came in during hash_page,
1322 * handle it now. 1358 * handle it now.
1323 * We jump to ret_from_except_lite rather than fast_exception_return 1359 * We jump to ret_from_except_lite rather than fast_exception_return
1324 * because ret_from_except_lite will check for and handle pending 1360 * because ret_from_except_lite will check for and handle pending
1325 * interrupts if necessary. 1361 * interrupts if necessary.
1326 */ 1362 */
1327 beq .ret_from_except_lite 1363 beq .ret_from_except_lite
1328 /* For a hash failure, we don't bother re-enabling interrupts */ 1364 /* For a hash failure, we don't bother re-enabling interrupts */
1329 ble- 12f 1365 ble- 12f
1330 1366
1331 /* 1367 /*
1332 * hash_page couldn't handle it, set soft interrupt enable back 1368 * hash_page couldn't handle it, set soft interrupt enable back
1333 * to what it was before the trap. Note that .local_irq_restore 1369 * to what it was before the trap. Note that .local_irq_restore
1334 * handles any interrupts pending at this point. 1370 * handles any interrupts pending at this point.
1335 */ 1371 */
1336 ld r3,SOFTE(r1) 1372 ld r3,SOFTE(r1)
1337 bl .local_irq_restore 1373 bl .local_irq_restore
1338 b 11f 1374 b 11f
1339 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 1375 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1340 #endif 1376 #endif
1341 BEGIN_FW_FTR_SECTION 1377 BEGIN_FW_FTR_SECTION
1342 beq fast_exception_return /* Return from exception on success */ 1378 beq fast_exception_return /* Return from exception on success */
1343 ble- 12f /* Failure return from hash_page */ 1379 ble- 12f /* Failure return from hash_page */
1344 1380
1345 /* fall through */ 1381 /* fall through */
1346 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) 1382 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1347 1383
1348 /* Here we have a page fault that hash_page can't handle. */ 1384 /* Here we have a page fault that hash_page can't handle. */
1349 _GLOBAL(handle_page_fault) 1385 _GLOBAL(handle_page_fault)
1350 ENABLE_INTS 1386 ENABLE_INTS
1351 11: ld r4,_DAR(r1) 1387 11: ld r4,_DAR(r1)
1352 ld r5,_DSISR(r1) 1388 ld r5,_DSISR(r1)
1353 addi r3,r1,STACK_FRAME_OVERHEAD 1389 addi r3,r1,STACK_FRAME_OVERHEAD
1354 bl .do_page_fault 1390 bl .do_page_fault
1355 cmpdi r3,0 1391 cmpdi r3,0
1356 beq+ .ret_from_except_lite 1392 beq+ .ret_from_except_lite
1357 bl .save_nvgprs 1393 bl .save_nvgprs
1358 mr r5,r3 1394 mr r5,r3
1359 addi r3,r1,STACK_FRAME_OVERHEAD 1395 addi r3,r1,STACK_FRAME_OVERHEAD
1360 lwz r4,_DAR(r1) 1396 lwz r4,_DAR(r1)
1361 bl .bad_page_fault 1397 bl .bad_page_fault
1362 b .ret_from_except 1398 b .ret_from_except
1363 1399
1364 /* We have a page fault that hash_page could handle but HV refused 1400 /* We have a page fault that hash_page could handle but HV refused
1365 * the PTE insertion 1401 * the PTE insertion
1366 */ 1402 */
1367 12: bl .save_nvgprs 1403 12: bl .save_nvgprs
1368 addi r3,r1,STACK_FRAME_OVERHEAD 1404 addi r3,r1,STACK_FRAME_OVERHEAD
1369 lwz r4,_DAR(r1) 1405 lwz r4,_DAR(r1)
1370 bl .low_hash_fault 1406 bl .low_hash_fault
1371 b .ret_from_except 1407 b .ret_from_except
1372 1408
1373 /* here we have a segment miss */ 1409 /* here we have a segment miss */
1374 _GLOBAL(do_ste_alloc) 1410 _GLOBAL(do_ste_alloc)
1375 bl .ste_allocate /* try to insert stab entry */ 1411 bl .ste_allocate /* try to insert stab entry */
1376 cmpdi r3,0 1412 cmpdi r3,0
1377 beq+ fast_exception_return 1413 beq+ fast_exception_return
1378 b .handle_page_fault 1414 b .handle_page_fault
1379 1415
1380 /* 1416 /*
1381 * r13 points to the PACA, r9 contains the saved CR, 1417 * r13 points to the PACA, r9 contains the saved CR,
1382 * r11 and r12 contain the saved SRR0 and SRR1. 1418 * r11 and r12 contain the saved SRR0 and SRR1.
1383 * r9 - r13 are saved in paca->exslb. 1419 * r9 - r13 are saved in paca->exslb.
1384 * We assume we aren't going to take any exceptions during this procedure. 1420 * We assume we aren't going to take any exceptions during this procedure.
1385 * We assume (DAR >> 60) == 0xc. 1421 * We assume (DAR >> 60) == 0xc.
1386 */ 1422 */
1387 .align 7 1423 .align 7
1388 _GLOBAL(do_stab_bolted) 1424 _GLOBAL(do_stab_bolted)
1389 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1425 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1390 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1426 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1391 1427
1392 /* Hash to the primary group */ 1428 /* Hash to the primary group */
1393 ld r10,PACASTABVIRT(r13) 1429 ld r10,PACASTABVIRT(r13)
1394 mfspr r11,SPRN_DAR 1430 mfspr r11,SPRN_DAR
1395 srdi r11,r11,28 1431 srdi r11,r11,28
1396 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1432 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1397 1433
1398 /* Calculate VSID */ 1434 /* Calculate VSID */
1399 /* This is a kernel address, so protovsid = ESID */ 1435 /* This is a kernel address, so protovsid = ESID */
1400 ASM_VSID_SCRAMBLE(r11, r9) 1436 ASM_VSID_SCRAMBLE(r11, r9)
1401 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 1437 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1402 1438
1403 /* Search the primary group for a free entry */ 1439 /* Search the primary group for a free entry */
1404 1: ld r11,0(r10) /* Test valid bit of the current ste */ 1440 1: ld r11,0(r10) /* Test valid bit of the current ste */
1405 andi. r11,r11,0x80 1441 andi. r11,r11,0x80
1406 beq 2f 1442 beq 2f
1407 addi r10,r10,16 1443 addi r10,r10,16
1408 andi. r11,r10,0x70 1444 andi. r11,r10,0x70
1409 bne 1b 1445 bne 1b
1410 1446
1411 /* Stick for only searching the primary group for now. */ 1447 /* Stick for only searching the primary group for now. */
1412 /* At least for now, we use a very simple random castout scheme */ 1448 /* At least for now, we use a very simple random castout scheme */
1413 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ 1449 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1414 mftb r11 1450 mftb r11
1415 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ 1451 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1416 ori r11,r11,0x10 1452 ori r11,r11,0x10
1417 1453
1418 /* r10 currently points to an ste one past the group of interest */ 1454 /* r10 currently points to an ste one past the group of interest */
1419 /* make it point to the randomly selected entry */ 1455 /* make it point to the randomly selected entry */
1420 subi r10,r10,128 1456 subi r10,r10,128
1421 or r10,r10,r11 /* r10 is the entry to invalidate */ 1457 or r10,r10,r11 /* r10 is the entry to invalidate */
1422 1458
1423 isync /* mark the entry invalid */ 1459 isync /* mark the entry invalid */
1424 ld r11,0(r10) 1460 ld r11,0(r10)
1425 rldicl r11,r11,56,1 /* clear the valid bit */ 1461 rldicl r11,r11,56,1 /* clear the valid bit */
1426 rotldi r11,r11,8 1462 rotldi r11,r11,8
1427 std r11,0(r10) 1463 std r11,0(r10)
1428 sync 1464 sync
1429 1465
1430 clrrdi r11,r11,28 /* Get the esid part of the ste */ 1466 clrrdi r11,r11,28 /* Get the esid part of the ste */
1431 slbie r11 1467 slbie r11
1432 1468
1433 2: std r9,8(r10) /* Store the vsid part of the ste */ 1469 2: std r9,8(r10) /* Store the vsid part of the ste */
1434 eieio 1470 eieio
1435 1471
1436 mfspr r11,SPRN_DAR /* Get the new esid */ 1472 mfspr r11,SPRN_DAR /* Get the new esid */
1437 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1473 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1438 ori r11,r11,0x90 /* Turn on valid and kp */ 1474 ori r11,r11,0x90 /* Turn on valid and kp */
1439 std r11,0(r10) /* Put new entry back into the stab */ 1475 std r11,0(r10) /* Put new entry back into the stab */
1440 1476
1441 sync 1477 sync
1442 1478
1443 /* All done -- return from exception. */ 1479 /* All done -- return from exception. */
1444 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1480 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1445 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ 1481 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1446 1482
1447 andi. r10,r12,MSR_RI 1483 andi. r10,r12,MSR_RI
1448 beq- unrecov_slb 1484 beq- unrecov_slb
1449 1485
1450 mtcrf 0x80,r9 /* restore CR */ 1486 mtcrf 0x80,r9 /* restore CR */
1451 1487
1452 mfmsr r10 1488 mfmsr r10
1453 clrrdi r10,r10,2 1489 clrrdi r10,r10,2
1454 mtmsrd r10,1 1490 mtmsrd r10,1
1455 1491
1456 mtspr SPRN_SRR0,r11 1492 mtspr SPRN_SRR0,r11
1457 mtspr SPRN_SRR1,r12 1493 mtspr SPRN_SRR1,r12
1458 ld r9,PACA_EXSLB+EX_R9(r13) 1494 ld r9,PACA_EXSLB+EX_R9(r13)
1459 ld r10,PACA_EXSLB+EX_R10(r13) 1495 ld r10,PACA_EXSLB+EX_R10(r13)
1460 ld r11,PACA_EXSLB+EX_R11(r13) 1496 ld r11,PACA_EXSLB+EX_R11(r13)
1461 ld r12,PACA_EXSLB+EX_R12(r13) 1497 ld r12,PACA_EXSLB+EX_R12(r13)
1462 ld r13,PACA_EXSLB+EX_R13(r13) 1498 ld r13,PACA_EXSLB+EX_R13(r13)
1463 rfid 1499 rfid
1464 b . /* prevent speculative execution */ 1500 b . /* prevent speculative execution */
1465 1501
1466 /* 1502 /*
1467 * Space for CPU0's segment table. 1503 * Space for CPU0's segment table.
1468 * 1504 *
1469 * On iSeries, the hypervisor must fill in at least one entry before 1505 * On iSeries, the hypervisor must fill in at least one entry before
1470 * we get control (with relocate on). The address is give to the hv 1506 * we get control (with relocate on). The address is give to the hv
1471 * as a page number (see xLparMap in lpardata.c), so this must be at a 1507 * as a page number (see xLparMap in lpardata.c), so this must be at a
1472 * fixed address (the linker can't compute (u64)&initial_stab >> 1508 * fixed address (the linker can't compute (u64)&initial_stab >>
1473 * PAGE_SHIFT). 1509 * PAGE_SHIFT).
1474 */ 1510 */
1475 . = STAB0_OFFSET /* 0x6000 */ 1511 . = STAB0_OFFSET /* 0x6000 */
1476 .globl initial_stab 1512 .globl initial_stab
1477 initial_stab: 1513 initial_stab:
1478 .space 4096 1514 .space 4096
1479 1515
1480 /* 1516 /*
1481 * Data area reserved for FWNMI option. 1517 * Data area reserved for FWNMI option.
1482 * This address (0x7000) is fixed by the RPA. 1518 * This address (0x7000) is fixed by the RPA.
1483 */ 1519 */
1484 .= 0x7000 1520 .= 0x7000
1485 .globl fwnmi_data_area 1521 .globl fwnmi_data_area
1486 fwnmi_data_area: 1522 fwnmi_data_area:
1487 1523
1488 /* iSeries does not use the FWNMI stuff, so it is safe to put 1524 /* iSeries does not use the FWNMI stuff, so it is safe to put
1489 * this here, even if we later allow kernels that will boot on 1525 * this here, even if we later allow kernels that will boot on
1490 * both pSeries and iSeries */ 1526 * both pSeries and iSeries */
1491 #ifdef CONFIG_PPC_ISERIES 1527 #ifdef CONFIG_PPC_ISERIES
1492 . = LPARMAP_PHYS 1528 . = LPARMAP_PHYS
1493 #include "lparmap.s" 1529 #include "lparmap.s"
1494 /* 1530 /*
1495 * This ".text" is here for old compilers that generate a trailing 1531 * This ".text" is here for old compilers that generate a trailing
1496 * .note section when compiling .c files to .s 1532 * .note section when compiling .c files to .s
1497 */ 1533 */
1498 .text 1534 .text
1499 #endif /* CONFIG_PPC_ISERIES */ 1535 #endif /* CONFIG_PPC_ISERIES */
1500 1536
1501 . = 0x8000 1537 . = 0x8000
1502 1538
1503 /* 1539 /*
1504 * On pSeries and most other platforms, secondary processors spin 1540 * On pSeries and most other platforms, secondary processors spin
1505 * in the following code. 1541 * in the following code.
1506 * At entry, r3 = this processor's number (physical cpu id) 1542 * At entry, r3 = this processor's number (physical cpu id)
1507 */ 1543 */
1508 _GLOBAL(generic_secondary_smp_init) 1544 _GLOBAL(generic_secondary_smp_init)
1509 mr r24,r3 1545 mr r24,r3
1510 1546
1511 /* turn on 64-bit mode */ 1547 /* turn on 64-bit mode */
1512 bl .enable_64b_mode 1548 bl .enable_64b_mode
1513 isync 1549 isync
1514 1550
1515 /* Set up a paca value for this processor. Since we have the 1551 /* Set up a paca value for this processor. Since we have the
1516 * physical cpu id in r24, we need to search the pacas to find 1552 * physical cpu id in r24, we need to search the pacas to find
1517 * which logical id maps to our physical one. 1553 * which logical id maps to our physical one.
1518 */ 1554 */
1519 LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */ 1555 LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */
1520 li r5,0 /* logical cpu id */ 1556 li r5,0 /* logical cpu id */
1521 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 1557 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1522 cmpw r6,r24 /* Compare to our id */ 1558 cmpw r6,r24 /* Compare to our id */
1523 beq 2f 1559 beq 2f
1524 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ 1560 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1525 addi r5,r5,1 1561 addi r5,r5,1
1526 cmpwi r5,NR_CPUS 1562 cmpwi r5,NR_CPUS
1527 blt 1b 1563 blt 1b
1528 1564
1529 mr r3,r24 /* not found, copy phys to r3 */ 1565 mr r3,r24 /* not found, copy phys to r3 */
1530 b .kexec_wait /* next kernel might do better */ 1566 b .kexec_wait /* next kernel might do better */
1531 1567
1532 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1568 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1533 /* From now on, r24 is expected to be logical cpuid */ 1569 /* From now on, r24 is expected to be logical cpuid */
1534 mr r24,r5 1570 mr r24,r5
1535 3: HMT_LOW 1571 3: HMT_LOW
1536 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ 1572 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1537 /* start. */ 1573 /* start. */
1538 sync 1574 sync
1539 1575
1540 #ifndef CONFIG_SMP 1576 #ifndef CONFIG_SMP
1541 b 3b /* Never go on non-SMP */ 1577 b 3b /* Never go on non-SMP */
1542 #else 1578 #else
1543 cmpwi 0,r23,0 1579 cmpwi 0,r23,0
1544 beq 3b /* Loop until told to go */ 1580 beq 3b /* Loop until told to go */
1545 1581
1546 /* See if we need to call a cpu state restore handler */ 1582 /* See if we need to call a cpu state restore handler */
1547 LOAD_REG_IMMEDIATE(r23, cur_cpu_spec) 1583 LOAD_REG_IMMEDIATE(r23, cur_cpu_spec)
1548 ld r23,0(r23) 1584 ld r23,0(r23)
1549 ld r23,CPU_SPEC_RESTORE(r23) 1585 ld r23,CPU_SPEC_RESTORE(r23)
1550 cmpdi 0,r23,0 1586 cmpdi 0,r23,0
1551 beq 4f 1587 beq 4f
1552 ld r23,0(r23) 1588 ld r23,0(r23)
1553 mtctr r23 1589 mtctr r23
1554 bctrl 1590 bctrl
1555 1591
1556 4: /* Create a temp kernel stack for use before relocation is on. */ 1592 4: /* Create a temp kernel stack for use before relocation is on. */
1557 ld r1,PACAEMERGSP(r13) 1593 ld r1,PACAEMERGSP(r13)
1558 subi r1,r1,STACK_FRAME_OVERHEAD 1594 subi r1,r1,STACK_FRAME_OVERHEAD
1559 1595
1560 b .__secondary_start 1596 b .__secondary_start
1561 #endif 1597 #endif
1562 1598
1563 #ifdef CONFIG_PPC_ISERIES 1599 #ifdef CONFIG_PPC_ISERIES
1564 _STATIC(__start_initialization_iSeries) 1600 _STATIC(__start_initialization_iSeries)
1565 /* Clear out the BSS */ 1601 /* Clear out the BSS */
1566 LOAD_REG_IMMEDIATE(r11,__bss_stop) 1602 LOAD_REG_IMMEDIATE(r11,__bss_stop)
1567 LOAD_REG_IMMEDIATE(r8,__bss_start) 1603 LOAD_REG_IMMEDIATE(r8,__bss_start)
1568 sub r11,r11,r8 /* bss size */ 1604 sub r11,r11,r8 /* bss size */
1569 addi r11,r11,7 /* round up to an even double word */ 1605 addi r11,r11,7 /* round up to an even double word */
1570 rldicl. r11,r11,61,3 /* shift right by 3 */ 1606 rldicl. r11,r11,61,3 /* shift right by 3 */
1571 beq 4f 1607 beq 4f
1572 addi r8,r8,-8 1608 addi r8,r8,-8
1573 li r0,0 1609 li r0,0
1574 mtctr r11 /* zero this many doublewords */ 1610 mtctr r11 /* zero this many doublewords */
1575 3: stdu r0,8(r8) 1611 3: stdu r0,8(r8)
1576 bdnz 3b 1612 bdnz 3b
1577 4: 1613 4:
1578 LOAD_REG_IMMEDIATE(r1,init_thread_union) 1614 LOAD_REG_IMMEDIATE(r1,init_thread_union)
1579 addi r1,r1,THREAD_SIZE 1615 addi r1,r1,THREAD_SIZE
1580 li r0,0 1616 li r0,0
1581 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1617 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1582 1618
1583 LOAD_REG_IMMEDIATE(r3,cpu_specs) 1619 LOAD_REG_IMMEDIATE(r3,cpu_specs)
1584 LOAD_REG_IMMEDIATE(r4,cur_cpu_spec) 1620 LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
1585 li r5,0 1621 li r5,0
1586 bl .identify_cpu 1622 bl .identify_cpu
1587 1623
1588 LOAD_REG_IMMEDIATE(r2,__toc_start) 1624 LOAD_REG_IMMEDIATE(r2,__toc_start)
1589 addi r2,r2,0x4000 1625 addi r2,r2,0x4000
1590 addi r2,r2,0x4000 1626 addi r2,r2,0x4000
1591 1627
1592 bl .iSeries_early_setup 1628 bl .iSeries_early_setup
1593 bl .early_setup 1629 bl .early_setup
1594 1630
1595 /* relocation is on at this point */ 1631 /* relocation is on at this point */
1596 1632
1597 b .start_here_common 1633 b .start_here_common
1598 #endif /* CONFIG_PPC_ISERIES */ 1634 #endif /* CONFIG_PPC_ISERIES */
1599 1635
1600 #ifdef CONFIG_PPC_MULTIPLATFORM 1636 #ifdef CONFIG_PPC_MULTIPLATFORM
1601 1637
1602 _STATIC(__mmu_off) 1638 _STATIC(__mmu_off)
1603 mfmsr r3 1639 mfmsr r3
1604 andi. r0,r3,MSR_IR|MSR_DR 1640 andi. r0,r3,MSR_IR|MSR_DR
1605 beqlr 1641 beqlr
1606 andc r3,r3,r0 1642 andc r3,r3,r0
1607 mtspr SPRN_SRR0,r4 1643 mtspr SPRN_SRR0,r4
1608 mtspr SPRN_SRR1,r3 1644 mtspr SPRN_SRR1,r3
1609 sync 1645 sync
1610 rfid 1646 rfid
1611 b . /* prevent speculative execution */ 1647 b . /* prevent speculative execution */
1612 1648
1613 1649
1614 /* 1650 /*
1615 * Here is our main kernel entry point. We support currently 2 kind of entries 1651 * Here is our main kernel entry point. We support currently 2 kind of entries
1616 * depending on the value of r5. 1652 * depending on the value of r5.
1617 * 1653 *
1618 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content 1654 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1619 * in r3...r7 1655 * in r3...r7
1620 * 1656 *
1621 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the 1657 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1622 * DT block, r4 is a physical pointer to the kernel itself 1658 * DT block, r4 is a physical pointer to the kernel itself
1623 * 1659 *
1624 */ 1660 */
1625 _GLOBAL(__start_initialization_multiplatform) 1661 _GLOBAL(__start_initialization_multiplatform)
1626 #ifdef CONFIG_PPC_MULTIPLATFORM 1662 #ifdef CONFIG_PPC_MULTIPLATFORM
1627 /* 1663 /*
1628 * Are we booted from a PROM Of-type client-interface ? 1664 * Are we booted from a PROM Of-type client-interface ?
1629 */ 1665 */
1630 cmpldi cr0,r5,0 1666 cmpldi cr0,r5,0
1631 bne .__boot_from_prom /* yes -> prom */ 1667 bne .__boot_from_prom /* yes -> prom */
1632 #endif 1668 #endif
1633 1669
1634 /* Save parameters */ 1670 /* Save parameters */
1635 mr r31,r3 1671 mr r31,r3
1636 mr r30,r4 1672 mr r30,r4
1637 1673
1638 /* Make sure we are running in 64 bits mode */ 1674 /* Make sure we are running in 64 bits mode */
1639 bl .enable_64b_mode 1675 bl .enable_64b_mode
1640 1676
1641 /* Setup some critical 970 SPRs before switching MMU off */ 1677 /* Setup some critical 970 SPRs before switching MMU off */
1642 mfspr r0,SPRN_PVR 1678 mfspr r0,SPRN_PVR
1643 srwi r0,r0,16 1679 srwi r0,r0,16
1644 cmpwi r0,0x39 /* 970 */ 1680 cmpwi r0,0x39 /* 970 */
1645 beq 1f 1681 beq 1f
1646 cmpwi r0,0x3c /* 970FX */ 1682 cmpwi r0,0x3c /* 970FX */
1647 beq 1f 1683 beq 1f
1648 cmpwi r0,0x44 /* 970MP */ 1684 cmpwi r0,0x44 /* 970MP */
1649 bne 2f 1685 bne 2f
1650 1: bl .__cpu_preinit_ppc970 1686 1: bl .__cpu_preinit_ppc970
1651 2: 1687 2:
1652 1688
1653 /* Switch off MMU if not already */ 1689 /* Switch off MMU if not already */
1654 LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE) 1690 LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
1655 add r4,r4,r30 1691 add r4,r4,r30
1656 bl .__mmu_off 1692 bl .__mmu_off
1657 b .__after_prom_start 1693 b .__after_prom_start
1658 1694
1659 #ifdef CONFIG_PPC_MULTIPLATFORM 1695 #ifdef CONFIG_PPC_MULTIPLATFORM
1660 _STATIC(__boot_from_prom) 1696 _STATIC(__boot_from_prom)
1661 /* Save parameters */ 1697 /* Save parameters */
1662 mr r31,r3 1698 mr r31,r3
1663 mr r30,r4 1699 mr r30,r4
1664 mr r29,r5 1700 mr r29,r5
1665 mr r28,r6 1701 mr r28,r6
1666 mr r27,r7 1702 mr r27,r7
1667 1703
1668 /* 1704 /*
1669 * Align the stack to 16-byte boundary 1705 * Align the stack to 16-byte boundary
1670 * Depending on the size and layout of the ELF sections in the initial 1706 * Depending on the size and layout of the ELF sections in the initial
1671 * boot binary, the stack pointer will be unalignet on PowerMac 1707 * boot binary, the stack pointer will be unalignet on PowerMac
1672 */ 1708 */
1673 rldicr r1,r1,0,59 1709 rldicr r1,r1,0,59
1674 1710
1675 /* Make sure we are running in 64 bits mode */ 1711 /* Make sure we are running in 64 bits mode */
1676 bl .enable_64b_mode 1712 bl .enable_64b_mode
1677 1713
1678 /* put a relocation offset into r3 */ 1714 /* put a relocation offset into r3 */
1679 bl .reloc_offset 1715 bl .reloc_offset
1680 1716
1681 LOAD_REG_IMMEDIATE(r2,__toc_start) 1717 LOAD_REG_IMMEDIATE(r2,__toc_start)
1682 addi r2,r2,0x4000 1718 addi r2,r2,0x4000
1683 addi r2,r2,0x4000 1719 addi r2,r2,0x4000
1684 1720
1685 /* Relocate the TOC from a virt addr to a real addr */ 1721 /* Relocate the TOC from a virt addr to a real addr */
1686 add r2,r2,r3 1722 add r2,r2,r3
1687 1723
1688 /* Restore parameters */ 1724 /* Restore parameters */
1689 mr r3,r31 1725 mr r3,r31
1690 mr r4,r30 1726 mr r4,r30
1691 mr r5,r29 1727 mr r5,r29
1692 mr r6,r28 1728 mr r6,r28
1693 mr r7,r27 1729 mr r7,r27
1694 1730
1695 /* Do all of the interaction with OF client interface */ 1731 /* Do all of the interaction with OF client interface */
1696 bl .prom_init 1732 bl .prom_init
1697 /* We never return */ 1733 /* We never return */
1698 trap 1734 trap
1699 #endif 1735 #endif
1700 1736
1701 /* 1737 /*
1702 * At this point, r3 contains the physical address we are running at, 1738 * At this point, r3 contains the physical address we are running at,
1703 * returned by prom_init() 1739 * returned by prom_init()
1704 */ 1740 */
1705 _STATIC(__after_prom_start) 1741 _STATIC(__after_prom_start)
1706 1742
1707 /* 1743 /*
1708 * We need to run with __start at physical address PHYSICAL_START. 1744 * We need to run with __start at physical address PHYSICAL_START.
1709 * This will leave some code in the first 256B of 1745 * This will leave some code in the first 256B of
1710 * real memory, which are reserved for software use. 1746 * real memory, which are reserved for software use.
1711 * The remainder of the first page is loaded with the fixed 1747 * The remainder of the first page is loaded with the fixed
1712 * interrupt vectors. The next two pages are filled with 1748 * interrupt vectors. The next two pages are filled with
1713 * unknown exception placeholders. 1749 * unknown exception placeholders.
1714 * 1750 *
1715 * Note: This process overwrites the OF exception vectors. 1751 * Note: This process overwrites the OF exception vectors.
1716 * r26 == relocation offset 1752 * r26 == relocation offset
1717 * r27 == KERNELBASE 1753 * r27 == KERNELBASE
1718 */ 1754 */
1719 bl .reloc_offset 1755 bl .reloc_offset
1720 mr r26,r3 1756 mr r26,r3
1721 LOAD_REG_IMMEDIATE(r27, KERNELBASE) 1757 LOAD_REG_IMMEDIATE(r27, KERNELBASE)
1722 1758
1723 LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */ 1759 LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */
1724 1760
1725 // XXX FIXME: Use phys returned by OF (r30) 1761 // XXX FIXME: Use phys returned by OF (r30)
1726 add r4,r27,r26 /* source addr */ 1762 add r4,r27,r26 /* source addr */
1727 /* current address of _start */ 1763 /* current address of _start */
1728 /* i.e. where we are running */ 1764 /* i.e. where we are running */
1729 /* the source addr */ 1765 /* the source addr */
1730 1766
1731 cmpdi r4,0 /* In some cases the loader may */ 1767 cmpdi r4,0 /* In some cases the loader may */
1732 beq .start_here_multiplatform /* have already put us at zero */ 1768 beq .start_here_multiplatform /* have already put us at zero */
1733 /* so we can skip the copy. */ 1769 /* so we can skip the copy. */
1734 LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */ 1770 LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
1735 sub r5,r5,r27 1771 sub r5,r5,r27
1736 1772
1737 li r6,0x100 /* Start offset, the first 0x100 */ 1773 li r6,0x100 /* Start offset, the first 0x100 */
1738 /* bytes were copied earlier. */ 1774 /* bytes were copied earlier. */
1739 1775
1740 bl .copy_and_flush /* copy the first n bytes */ 1776 bl .copy_and_flush /* copy the first n bytes */
1741 /* this includes the code being */ 1777 /* this includes the code being */
1742 /* executed here. */ 1778 /* executed here. */
1743 1779
1744 LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */ 1780 LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */
1745 mtctr r0 /* that we just made/relocated */ 1781 mtctr r0 /* that we just made/relocated */
1746 bctr 1782 bctr
1747 1783
1748 4: LOAD_REG_IMMEDIATE(r5,klimit) 1784 4: LOAD_REG_IMMEDIATE(r5,klimit)
1749 add r5,r5,r26 1785 add r5,r5,r26
1750 ld r5,0(r5) /* get the value of klimit */ 1786 ld r5,0(r5) /* get the value of klimit */
1751 sub r5,r5,r27 1787 sub r5,r5,r27
1752 bl .copy_and_flush /* copy the rest */ 1788 bl .copy_and_flush /* copy the rest */
1753 b .start_here_multiplatform 1789 b .start_here_multiplatform
1754 1790
1755 #endif /* CONFIG_PPC_MULTIPLATFORM */ 1791 #endif /* CONFIG_PPC_MULTIPLATFORM */
1756 1792
1757 /* 1793 /*
1758 * Copy routine used to copy the kernel to start at physical address 0 1794 * Copy routine used to copy the kernel to start at physical address 0
1759 * and flush and invalidate the caches as needed. 1795 * and flush and invalidate the caches as needed.
1760 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 1796 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1761 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 1797 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1762 * 1798 *
1763 * Note: this routine *only* clobbers r0, r6 and lr 1799 * Note: this routine *only* clobbers r0, r6 and lr
1764 */ 1800 */
1765 _GLOBAL(copy_and_flush) 1801 _GLOBAL(copy_and_flush)
1766 addi r5,r5,-8 1802 addi r5,r5,-8
1767 addi r6,r6,-8 1803 addi r6,r6,-8
1768 4: li r0,8 /* Use the smallest common */ 1804 4: li r0,8 /* Use the smallest common */
1769 /* denominator cache line */ 1805 /* denominator cache line */
1770 /* size. This results in */ 1806 /* size. This results in */
1771 /* extra cache line flushes */ 1807 /* extra cache line flushes */
1772 /* but operation is correct. */ 1808 /* but operation is correct. */
1773 /* Can't get cache line size */ 1809 /* Can't get cache line size */
1774 /* from NACA as it is being */ 1810 /* from NACA as it is being */
1775 /* moved too. */ 1811 /* moved too. */
1776 1812
1777 mtctr r0 /* put # words/line in ctr */ 1813 mtctr r0 /* put # words/line in ctr */
1778 3: addi r6,r6,8 /* copy a cache line */ 1814 3: addi r6,r6,8 /* copy a cache line */
1779 ldx r0,r6,r4 1815 ldx r0,r6,r4
1780 stdx r0,r6,r3 1816 stdx r0,r6,r3
1781 bdnz 3b 1817 bdnz 3b
1782 dcbst r6,r3 /* write it to memory */ 1818 dcbst r6,r3 /* write it to memory */
1783 sync 1819 sync
1784 icbi r6,r3 /* flush the icache line */ 1820 icbi r6,r3 /* flush the icache line */
1785 cmpld 0,r6,r5 1821 cmpld 0,r6,r5
1786 blt 4b 1822 blt 4b
1787 sync 1823 sync
1788 addi r5,r5,8 1824 addi r5,r5,8
1789 addi r6,r6,8 1825 addi r6,r6,8
1790 blr 1826 blr
1791 1827
1792 .align 8 1828 .align 8
1793 copy_to_here: 1829 copy_to_here:
1794 1830
1795 #ifdef CONFIG_SMP 1831 #ifdef CONFIG_SMP
1796 #ifdef CONFIG_PPC_PMAC 1832 #ifdef CONFIG_PPC_PMAC
1797 /* 1833 /*
1798 * On PowerMac, secondary processors starts from the reset vector, which 1834 * On PowerMac, secondary processors starts from the reset vector, which
1799 * is temporarily turned into a call to one of the functions below. 1835 * is temporarily turned into a call to one of the functions below.
1800 */ 1836 */
1801 .section ".text"; 1837 .section ".text";
1802 .align 2 ; 1838 .align 2 ;
1803 1839
1804 .globl __secondary_start_pmac_0 1840 .globl __secondary_start_pmac_0
1805 __secondary_start_pmac_0: 1841 __secondary_start_pmac_0:
1806 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ 1842 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1807 li r24,0 1843 li r24,0
1808 b 1f 1844 b 1f
1809 li r24,1 1845 li r24,1
1810 b 1f 1846 b 1f
1811 li r24,2 1847 li r24,2
1812 b 1f 1848 b 1f
1813 li r24,3 1849 li r24,3
1814 1: 1850 1:
1815 1851
1816 _GLOBAL(pmac_secondary_start) 1852 _GLOBAL(pmac_secondary_start)
1817 /* turn on 64-bit mode */ 1853 /* turn on 64-bit mode */
1818 bl .enable_64b_mode 1854 bl .enable_64b_mode
1819 isync 1855 isync
1820 1856
1821 /* Copy some CPU settings from CPU 0 */ 1857 /* Copy some CPU settings from CPU 0 */
1822 bl .__restore_cpu_ppc970 1858 bl .__restore_cpu_ppc970
1823 1859
1824 /* pSeries do that early though I don't think we really need it */ 1860 /* pSeries do that early though I don't think we really need it */
1825 mfmsr r3 1861 mfmsr r3
1826 ori r3,r3,MSR_RI 1862 ori r3,r3,MSR_RI
1827 mtmsrd r3 /* RI on */ 1863 mtmsrd r3 /* RI on */
1828 1864
1829 /* Set up a paca value for this processor. */ 1865 /* Set up a paca value for this processor. */
1830 LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */ 1866 LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */
1831 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1867 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1832 add r13,r13,r4 /* for this processor. */ 1868 add r13,r13,r4 /* for this processor. */
1833 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1869 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1834 1870
1835 /* Create a temp kernel stack for use before relocation is on. */ 1871 /* Create a temp kernel stack for use before relocation is on. */
1836 ld r1,PACAEMERGSP(r13) 1872 ld r1,PACAEMERGSP(r13)
1837 subi r1,r1,STACK_FRAME_OVERHEAD 1873 subi r1,r1,STACK_FRAME_OVERHEAD
1838 1874
1839 b .__secondary_start 1875 b .__secondary_start
1840 1876
1841 #endif /* CONFIG_PPC_PMAC */ 1877 #endif /* CONFIG_PPC_PMAC */
1842 1878
1843 /* 1879 /*
1844 * This function is called after the master CPU has released the 1880 * This function is called after the master CPU has released the
1845 * secondary processors. The execution environment is relocation off. 1881 * secondary processors. The execution environment is relocation off.
1846 * The paca for this processor has the following fields initialized at 1882 * The paca for this processor has the following fields initialized at
1847 * this point: 1883 * this point:
1848 * 1. Processor number 1884 * 1. Processor number
1849 * 2. Segment table pointer (virtual address) 1885 * 2. Segment table pointer (virtual address)
1850 * On entry the following are set: 1886 * On entry the following are set:
1851 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries 1887 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1852 * r24 = cpu# (in Linux terms) 1888 * r24 = cpu# (in Linux terms)
1853 * r13 = paca virtual address 1889 * r13 = paca virtual address
1854 * SPRG3 = paca virtual address 1890 * SPRG3 = paca virtual address
1855 */ 1891 */
1856 _GLOBAL(__secondary_start) 1892 _GLOBAL(__secondary_start)
1857 /* Set thread priority to MEDIUM */ 1893 /* Set thread priority to MEDIUM */
1858 HMT_MEDIUM 1894 HMT_MEDIUM
1859 1895
1860 /* Load TOC */ 1896 /* Load TOC */
1861 ld r2,PACATOC(r13) 1897 ld r2,PACATOC(r13)
1862 1898
1863 /* Do early setup for that CPU (stab, slb, hash table pointer) */ 1899 /* Do early setup for that CPU (stab, slb, hash table pointer) */
1864 bl .early_setup_secondary 1900 bl .early_setup_secondary
1865 1901
1866 /* Initialize the kernel stack. Just a repeat for iSeries. */ 1902 /* Initialize the kernel stack. Just a repeat for iSeries. */
1867 LOAD_REG_ADDR(r3, current_set) 1903 LOAD_REG_ADDR(r3, current_set)
1868 sldi r28,r24,3 /* get current_set[cpu#] */ 1904 sldi r28,r24,3 /* get current_set[cpu#] */
1869 ldx r1,r3,r28 1905 ldx r1,r3,r28
1870 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 1906 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1871 std r1,PACAKSAVE(r13) 1907 std r1,PACAKSAVE(r13)
1872 1908
1873 /* Clear backchain so we get nice backtraces */ 1909 /* Clear backchain so we get nice backtraces */
1874 li r7,0 1910 li r7,0
1875 mtlr r7 1911 mtlr r7
1876 1912
1877 /* enable MMU and jump to start_secondary */ 1913 /* enable MMU and jump to start_secondary */
1878 LOAD_REG_ADDR(r3, .start_secondary_prolog) 1914 LOAD_REG_ADDR(r3, .start_secondary_prolog)
1879 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 1915 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1880 #ifdef DO_SOFT_DISABLE 1916 #ifdef CONFIG_PPC_ISERIES
1881 BEGIN_FW_FTR_SECTION 1917 BEGIN_FW_FTR_SECTION
1882 ori r4,r4,MSR_EE 1918 ori r4,r4,MSR_EE
1883 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 1919 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1884 #endif 1920 #endif
1921 BEGIN_FW_FTR_SECTION
1922 stb r7,PACASOFTIRQEN(r13)
1923 stb r7,PACAHARDIRQEN(r13)
1924 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1925
1885 mtspr SPRN_SRR0,r3 1926 mtspr SPRN_SRR0,r3
1886 mtspr SPRN_SRR1,r4 1927 mtspr SPRN_SRR1,r4
1887 rfid 1928 rfid
1888 b . /* prevent speculative execution */ 1929 b . /* prevent speculative execution */
1889 1930
1890 /* 1931 /*
1891 * Running with relocation on at this point. All we want to do is 1932 * Running with relocation on at this point. All we want to do is
1892 * zero the stack back-chain pointer before going into C code. 1933 * zero the stack back-chain pointer before going into C code.
1893 */ 1934 */
1894 _GLOBAL(start_secondary_prolog) 1935 _GLOBAL(start_secondary_prolog)
1895 li r3,0 1936 li r3,0
1896 std r3,0(r1) /* Zero the stack frame pointer */ 1937 std r3,0(r1) /* Zero the stack frame pointer */
1897 bl .start_secondary 1938 bl .start_secondary
1898 b . 1939 b .
1899 #endif 1940 #endif
1900 1941
1901 /* 1942 /*
1902 * This subroutine clobbers r11 and r12 1943 * This subroutine clobbers r11 and r12
1903 */ 1944 */
1904 _GLOBAL(enable_64b_mode) 1945 _GLOBAL(enable_64b_mode)
1905 mfmsr r11 /* grab the current MSR */ 1946 mfmsr r11 /* grab the current MSR */
1906 li r12,1 1947 li r12,1
1907 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) 1948 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1908 or r11,r11,r12 1949 or r11,r11,r12
1909 li r12,1 1950 li r12,1
1910 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) 1951 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1911 or r11,r11,r12 1952 or r11,r11,r12
1912 mtmsrd r11 1953 mtmsrd r11
1913 isync 1954 isync
1914 blr 1955 blr
1915 1956
1916 #ifdef CONFIG_PPC_MULTIPLATFORM 1957 #ifdef CONFIG_PPC_MULTIPLATFORM
1917 /* 1958 /*
1918 * This is where the main kernel code starts. 1959 * This is where the main kernel code starts.
1919 */ 1960 */
1920 _STATIC(start_here_multiplatform) 1961 _STATIC(start_here_multiplatform)
1921 /* get a new offset, now that the kernel has moved. */ 1962 /* get a new offset, now that the kernel has moved. */
1922 bl .reloc_offset 1963 bl .reloc_offset
1923 mr r26,r3 1964 mr r26,r3
1924 1965
1925 /* Clear out the BSS. It may have been done in prom_init, 1966 /* Clear out the BSS. It may have been done in prom_init,
1926 * already but that's irrelevant since prom_init will soon 1967 * already but that's irrelevant since prom_init will soon
1927 * be detached from the kernel completely. Besides, we need 1968 * be detached from the kernel completely. Besides, we need
1928 * to clear it now for kexec-style entry. 1969 * to clear it now for kexec-style entry.
1929 */ 1970 */
1930 LOAD_REG_IMMEDIATE(r11,__bss_stop) 1971 LOAD_REG_IMMEDIATE(r11,__bss_stop)
1931 LOAD_REG_IMMEDIATE(r8,__bss_start) 1972 LOAD_REG_IMMEDIATE(r8,__bss_start)
1932 sub r11,r11,r8 /* bss size */ 1973 sub r11,r11,r8 /* bss size */
1933 addi r11,r11,7 /* round up to an even double word */ 1974 addi r11,r11,7 /* round up to an even double word */
1934 rldicl. r11,r11,61,3 /* shift right by 3 */ 1975 rldicl. r11,r11,61,3 /* shift right by 3 */
1935 beq 4f 1976 beq 4f
1936 addi r8,r8,-8 1977 addi r8,r8,-8
1937 li r0,0 1978 li r0,0
1938 mtctr r11 /* zero this many doublewords */ 1979 mtctr r11 /* zero this many doublewords */
1939 3: stdu r0,8(r8) 1980 3: stdu r0,8(r8)
1940 bdnz 3b 1981 bdnz 3b
1941 4: 1982 4:
1942 1983
1943 mfmsr r6 1984 mfmsr r6
1944 ori r6,r6,MSR_RI 1985 ori r6,r6,MSR_RI
1945 mtmsrd r6 /* RI on */ 1986 mtmsrd r6 /* RI on */
1946 1987
1947 /* The following gets the stack and TOC set up with the regs */ 1988 /* The following gets the stack and TOC set up with the regs */
1948 /* pointing to the real addr of the kernel stack. This is */ 1989 /* pointing to the real addr of the kernel stack. This is */
1949 /* all done to support the C function call below which sets */ 1990 /* all done to support the C function call below which sets */
1950 /* up the htab. This is done because we have relocated the */ 1991 /* up the htab. This is done because we have relocated the */
1951 /* kernel but are still running in real mode. */ 1992 /* kernel but are still running in real mode. */
1952 1993
1953 LOAD_REG_IMMEDIATE(r3,init_thread_union) 1994 LOAD_REG_IMMEDIATE(r3,init_thread_union)
1954 add r3,r3,r26 1995 add r3,r3,r26
1955 1996
1956 /* set up a stack pointer (physical address) */ 1997 /* set up a stack pointer (physical address) */
1957 addi r1,r3,THREAD_SIZE 1998 addi r1,r3,THREAD_SIZE
1958 li r0,0 1999 li r0,0
1959 stdu r0,-STACK_FRAME_OVERHEAD(r1) 2000 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1960 2001
1961 /* set up the TOC (physical address) */ 2002 /* set up the TOC (physical address) */
1962 LOAD_REG_IMMEDIATE(r2,__toc_start) 2003 LOAD_REG_IMMEDIATE(r2,__toc_start)
1963 addi r2,r2,0x4000 2004 addi r2,r2,0x4000
1964 addi r2,r2,0x4000 2005 addi r2,r2,0x4000
1965 add r2,r2,r26 2006 add r2,r2,r26
1966 2007
1967 LOAD_REG_IMMEDIATE(r3, cpu_specs) 2008 LOAD_REG_IMMEDIATE(r3, cpu_specs)
1968 add r3,r3,r26 2009 add r3,r3,r26
1969 LOAD_REG_IMMEDIATE(r4,cur_cpu_spec) 2010 LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
1970 add r4,r4,r26 2011 add r4,r4,r26
1971 mr r5,r26 2012 mr r5,r26
1972 bl .identify_cpu 2013 bl .identify_cpu
1973 2014
1974 /* Do very early kernel initializations, including initial hash table, 2015 /* Do very early kernel initializations, including initial hash table,
1975 * stab and slb setup before we turn on relocation. */ 2016 * stab and slb setup before we turn on relocation. */
1976 2017
1977 /* Restore parameters passed from prom_init/kexec */ 2018 /* Restore parameters passed from prom_init/kexec */
1978 mr r3,r31 2019 mr r3,r31
1979 bl .early_setup 2020 bl .early_setup
1980 2021
1981 LOAD_REG_IMMEDIATE(r3, .start_here_common) 2022 LOAD_REG_IMMEDIATE(r3, .start_here_common)
1982 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 2023 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1983 mtspr SPRN_SRR0,r3 2024 mtspr SPRN_SRR0,r3
1984 mtspr SPRN_SRR1,r4 2025 mtspr SPRN_SRR1,r4
1985 rfid 2026 rfid
1986 b . /* prevent speculative execution */ 2027 b . /* prevent speculative execution */
1987 #endif /* CONFIG_PPC_MULTIPLATFORM */ 2028 #endif /* CONFIG_PPC_MULTIPLATFORM */
1988 2029
1989 /* This is where all platforms converge execution */ 2030 /* This is where all platforms converge execution */
1990 _STATIC(start_here_common) 2031 _STATIC(start_here_common)
1991 /* relocation is on at this point */ 2032 /* relocation is on at this point */
1992 2033
1993 /* The following code sets up the SP and TOC now that we are */ 2034 /* The following code sets up the SP and TOC now that we are */
1994 /* running with translation enabled. */ 2035 /* running with translation enabled. */
1995 2036
1996 LOAD_REG_IMMEDIATE(r3,init_thread_union) 2037 LOAD_REG_IMMEDIATE(r3,init_thread_union)
1997 2038
1998 /* set up the stack */ 2039 /* set up the stack */
1999 addi r1,r3,THREAD_SIZE 2040 addi r1,r3,THREAD_SIZE
2000 li r0,0 2041 li r0,0
2001 stdu r0,-STACK_FRAME_OVERHEAD(r1) 2042 stdu r0,-STACK_FRAME_OVERHEAD(r1)
2002 2043
2003 /* Apply the CPUs-specific fixups (nop out sections not relevant 2044 /* Apply the CPUs-specific fixups (nop out sections not relevant
2004 * to this CPU 2045 * to this CPU
2005 */ 2046 */
2006 li r3,0 2047 li r3,0
2007 bl .do_cpu_ftr_fixups 2048 bl .do_cpu_ftr_fixups
2008 bl .do_fw_ftr_fixups 2049 bl .do_fw_ftr_fixups
2009 2050
2010 /* ptr to current */ 2051 /* ptr to current */
2011 LOAD_REG_IMMEDIATE(r4, init_task) 2052 LOAD_REG_IMMEDIATE(r4, init_task)
2012 std r4,PACACURRENT(r13) 2053 std r4,PACACURRENT(r13)
2013 2054
2014 /* Load the TOC */ 2055 /* Load the TOC */
2015 ld r2,PACATOC(r13) 2056 ld r2,PACATOC(r13)
2016 std r1,PACAKSAVE(r13) 2057 std r1,PACAKSAVE(r13)
2017 2058
2018 bl .setup_system 2059 bl .setup_system
2019 2060
2020 /* Load up the kernel context */ 2061 /* Load up the kernel context */
2021 5: 2062 5:
2022 #ifdef DO_SOFT_DISABLE
2023 BEGIN_FW_FTR_SECTION
2024 li r5,0 2063 li r5,0
2025 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ 2064 stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */
2065 #ifdef CONFIG_PPC_ISERIES
2066 BEGIN_FW_FTR_SECTION
2026 mfmsr r5 2067 mfmsr r5
2027 ori r5,r5,MSR_EE /* Hard Enabled */ 2068 ori r5,r5,MSR_EE /* Hard Enabled */
2028 mtmsrd r5 2069 mtmsrd r5
2029 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 2070 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
2030 #endif 2071 #endif
2072 BEGIN_FW_FTR_SECTION
2073 stb r5,PACAHARDIRQEN(r13)
2074 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
2031 2075
2032 bl .start_kernel 2076 bl .start_kernel
2033 2077
2034 /* Not reached */ 2078 /* Not reached */
2035 BUG_OPCODE 2079 BUG_OPCODE
2036 2080
2037 /* 2081 /*
2038 * We put a few things here that have to be page-aligned. 2082 * We put a few things here that have to be page-aligned.
2039 * This stuff goes at the beginning of the bss, which is page-aligned. 2083 * This stuff goes at the beginning of the bss, which is page-aligned.
2040 */ 2084 */
2041 .section ".bss" 2085 .section ".bss"
2042 2086
arch/powerpc/kernel/idle_power4.S
1 /* 1 /*
2 * This file contains the power_save function for 970-family CPUs. 2 * This file contains the power_save function for 970-family CPUs.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 9
10 #include <linux/threads.h> 10 #include <linux/threads.h>
11 #include <asm/processor.h> 11 #include <asm/processor.h>
12 #include <asm/page.h> 12 #include <asm/page.h>
13 #include <asm/cputable.h> 13 #include <asm/cputable.h>
14 #include <asm/thread_info.h> 14 #include <asm/thread_info.h>
15 #include <asm/ppc_asm.h> 15 #include <asm/ppc_asm.h>
16 #include <asm/asm-offsets.h> 16 #include <asm/asm-offsets.h>
17 17
18 #undef DEBUG 18 #undef DEBUG
19 19
20 .text 20 .text
21 21
22 _GLOBAL(power4_idle) 22 _GLOBAL(power4_idle)
23 BEGIN_FTR_SECTION 23 BEGIN_FTR_SECTION
24 blr 24 blr
25 END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) 25 END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
26 /* Now check if user or arch enabled NAP mode */ 26 /* Now check if user or arch enabled NAP mode */
27 LOAD_REG_ADDRBASE(r3,powersave_nap) 27 LOAD_REG_ADDRBASE(r3,powersave_nap)
28 lwz r4,ADDROFF(powersave_nap)(r3) 28 lwz r4,ADDROFF(powersave_nap)(r3)
29 cmpwi 0,r4,0 29 cmpwi 0,r4,0
30 beqlr 30 beqlr
31 31
32 /* Go to NAP now */ 32 /* Go to NAP now */
33 mfmsr r7
34 rldicl r0,r7,48,1
35 rotldi r0,r0,16
36 mtmsrd r0,1 /* hard-disable interrupts */
37 li r0,1
38 stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
39 stb r0,PACAHARDIRQEN(r13)
33 BEGIN_FTR_SECTION 40 BEGIN_FTR_SECTION
34 DSSALL 41 DSSALL
35 sync 42 sync
36 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 43 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
37 clrrdi r9,r1,THREAD_SHIFT /* current thread_info */ 44 clrrdi r9,r1,THREAD_SHIFT /* current thread_info */
38 ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ 45 ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
39 ori r8,r8,_TLF_NAPPING /* so when we take an exception */ 46 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
40 std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ 47 std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
41 mfmsr r7
42 ori r7,r7,MSR_EE 48 ori r7,r7,MSR_EE
43 oris r7,r7,MSR_POW@h 49 oris r7,r7,MSR_POW@h
44 1: sync 50 1: sync
45 isync 51 isync
46 mtmsrd r7 52 mtmsrd r7
47 isync 53 isync
48 b 1b 54 b 1b
49 55
arch/powerpc/kernel/irq.c
1 /* 1 /*
2 * Derived from arch/i386/kernel/irq.c 2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds 3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas 4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan 7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras 8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). 10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
11 * 11 *
12 * This program is free software; you can redistribute it and/or 12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 * 16 *
17 * This file contains the code used by various IRQ handling routines: 17 * This file contains the code used by various IRQ handling routines:
18 * asking for different IRQ's should be done through these routines 18 * asking for different IRQ's should be done through these routines
19 * instead of just grabbing them. Thus setups with different IRQ numbers 19 * instead of just grabbing them. Thus setups with different IRQ numbers
20 * shouldn't result in any weird surprises, and installing new handlers 20 * shouldn't result in any weird surprises, and installing new handlers
21 * should be easier. 21 * should be easier.
22 * 22 *
23 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 23 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
24 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 24 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
25 * mask register (of which only 16 are defined), hence the weird shifting 25 * mask register (of which only 16 are defined), hence the weird shifting
26 * and complement of the cached_irq_mask. I want to be able to stuff 26 * and complement of the cached_irq_mask. I want to be able to stuff
27 * this right into the SIU SMASK register. 27 * this right into the SIU SMASK register.
28 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx 28 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
29 * to reduce code space and undefined function references. 29 * to reduce code space and undefined function references.
30 */ 30 */
31 31
32 #undef DEBUG 32 #undef DEBUG
33 33
34 #include <linux/module.h> 34 #include <linux/module.h>
35 #include <linux/threads.h> 35 #include <linux/threads.h>
36 #include <linux/kernel_stat.h> 36 #include <linux/kernel_stat.h>
37 #include <linux/signal.h> 37 #include <linux/signal.h>
38 #include <linux/sched.h> 38 #include <linux/sched.h>
39 #include <linux/ptrace.h> 39 #include <linux/ptrace.h>
40 #include <linux/ioport.h> 40 #include <linux/ioport.h>
41 #include <linux/interrupt.h> 41 #include <linux/interrupt.h>
42 #include <linux/timex.h> 42 #include <linux/timex.h>
43 #include <linux/init.h> 43 #include <linux/init.h>
44 #include <linux/slab.h> 44 #include <linux/slab.h>
45 #include <linux/delay.h> 45 #include <linux/delay.h>
46 #include <linux/irq.h> 46 #include <linux/irq.h>
47 #include <linux/seq_file.h> 47 #include <linux/seq_file.h>
48 #include <linux/cpumask.h> 48 #include <linux/cpumask.h>
49 #include <linux/profile.h> 49 #include <linux/profile.h>
50 #include <linux/bitops.h> 50 #include <linux/bitops.h>
51 #include <linux/list.h> 51 #include <linux/list.h>
52 #include <linux/radix-tree.h> 52 #include <linux/radix-tree.h>
53 #include <linux/mutex.h> 53 #include <linux/mutex.h>
54 #include <linux/bootmem.h> 54 #include <linux/bootmem.h>
55 #include <linux/pci.h> 55 #include <linux/pci.h>
56 56
57 #include <asm/uaccess.h> 57 #include <asm/uaccess.h>
58 #include <asm/system.h> 58 #include <asm/system.h>
59 #include <asm/io.h> 59 #include <asm/io.h>
60 #include <asm/pgtable.h> 60 #include <asm/pgtable.h>
61 #include <asm/irq.h> 61 #include <asm/irq.h>
62 #include <asm/cache.h> 62 #include <asm/cache.h>
63 #include <asm/prom.h> 63 #include <asm/prom.h>
64 #include <asm/ptrace.h> 64 #include <asm/ptrace.h>
65 #include <asm/machdep.h> 65 #include <asm/machdep.h>
66 #include <asm/udbg.h> 66 #include <asm/udbg.h>
67 #ifdef CONFIG_PPC_ISERIES 67 #ifdef CONFIG_PPC64
68 #include <asm/paca.h> 68 #include <asm/paca.h>
69 #include <asm/firmware.h>
69 #endif 70 #endif
70 71
71 int __irq_offset_value; 72 int __irq_offset_value;
72 static int ppc_spurious_interrupts; 73 static int ppc_spurious_interrupts;
73 74
74 #ifdef CONFIG_PPC32 75 #ifdef CONFIG_PPC32
75 EXPORT_SYMBOL(__irq_offset_value); 76 EXPORT_SYMBOL(__irq_offset_value);
76 atomic_t ppc_n_lost_interrupts; 77 atomic_t ppc_n_lost_interrupts;
77 78
78 #ifndef CONFIG_PPC_MERGE 79 #ifndef CONFIG_PPC_MERGE
79 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 80 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
80 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 81 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
81 #endif 82 #endif
82 83
83 #ifdef CONFIG_TAU_INT 84 #ifdef CONFIG_TAU_INT
84 extern int tau_initialized; 85 extern int tau_initialized;
85 extern int tau_interrupts(int); 86 extern int tau_interrupts(int);
86 #endif 87 #endif
87 #endif /* CONFIG_PPC32 */ 88 #endif /* CONFIG_PPC32 */
88 89
89 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) 90 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
90 extern atomic_t ipi_recv; 91 extern atomic_t ipi_recv;
91 extern atomic_t ipi_sent; 92 extern atomic_t ipi_sent;
92 #endif 93 #endif
93 94
94 #ifdef CONFIG_PPC64 95 #ifdef CONFIG_PPC64
95 EXPORT_SYMBOL(irq_desc); 96 EXPORT_SYMBOL(irq_desc);
96 97
97 int distribute_irqs = 1; 98 int distribute_irqs = 1;
99
100 void local_irq_restore(unsigned long en)
101 {
102 get_paca()->soft_enabled = en;
103 if (!en)
104 return;
105
106 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
107 if (get_paca()->lppaca_ptr->int_dword.any_int)
108 iseries_handle_interrupts();
109 return;
110 }
111
112 if (get_paca()->hard_enabled)
113 return;
114 /* need to hard-enable interrupts here */
115 get_paca()->hard_enabled = en;
116 if ((int)mfspr(SPRN_DEC) < 0)
117 mtspr(SPRN_DEC, 1);
118 hard_irq_enable();
119 }
98 #endif /* CONFIG_PPC64 */ 120 #endif /* CONFIG_PPC64 */
99 121
100 int show_interrupts(struct seq_file *p, void *v) 122 int show_interrupts(struct seq_file *p, void *v)
101 { 123 {
102 int i = *(loff_t *)v, j; 124 int i = *(loff_t *)v, j;
103 struct irqaction *action; 125 struct irqaction *action;
104 irq_desc_t *desc; 126 irq_desc_t *desc;
105 unsigned long flags; 127 unsigned long flags;
106 128
107 if (i == 0) { 129 if (i == 0) {
108 seq_puts(p, " "); 130 seq_puts(p, " ");
109 for_each_online_cpu(j) 131 for_each_online_cpu(j)
110 seq_printf(p, "CPU%d ", j); 132 seq_printf(p, "CPU%d ", j);
111 seq_putc(p, '\n'); 133 seq_putc(p, '\n');
112 } 134 }
113 135
114 if (i < NR_IRQS) { 136 if (i < NR_IRQS) {
115 desc = get_irq_desc(i); 137 desc = get_irq_desc(i);
116 spin_lock_irqsave(&desc->lock, flags); 138 spin_lock_irqsave(&desc->lock, flags);
117 action = desc->action; 139 action = desc->action;
118 if (!action || !action->handler) 140 if (!action || !action->handler)
119 goto skip; 141 goto skip;
120 seq_printf(p, "%3d: ", i); 142 seq_printf(p, "%3d: ", i);
121 #ifdef CONFIG_SMP 143 #ifdef CONFIG_SMP
122 for_each_online_cpu(j) 144 for_each_online_cpu(j)
123 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 145 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
124 #else 146 #else
125 seq_printf(p, "%10u ", kstat_irqs(i)); 147 seq_printf(p, "%10u ", kstat_irqs(i));
126 #endif /* CONFIG_SMP */ 148 #endif /* CONFIG_SMP */
127 if (desc->chip) 149 if (desc->chip)
128 seq_printf(p, " %s ", desc->chip->typename); 150 seq_printf(p, " %s ", desc->chip->typename);
129 else 151 else
130 seq_puts(p, " None "); 152 seq_puts(p, " None ");
131 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); 153 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
132 seq_printf(p, " %s", action->name); 154 seq_printf(p, " %s", action->name);
133 for (action = action->next; action; action = action->next) 155 for (action = action->next; action; action = action->next)
134 seq_printf(p, ", %s", action->name); 156 seq_printf(p, ", %s", action->name);
135 seq_putc(p, '\n'); 157 seq_putc(p, '\n');
136 skip: 158 skip:
137 spin_unlock_irqrestore(&desc->lock, flags); 159 spin_unlock_irqrestore(&desc->lock, flags);
138 } else if (i == NR_IRQS) { 160 } else if (i == NR_IRQS) {
139 #ifdef CONFIG_PPC32 161 #ifdef CONFIG_PPC32
140 #ifdef CONFIG_TAU_INT 162 #ifdef CONFIG_TAU_INT
141 if (tau_initialized){ 163 if (tau_initialized){
142 seq_puts(p, "TAU: "); 164 seq_puts(p, "TAU: ");
143 for_each_online_cpu(j) 165 for_each_online_cpu(j)
144 seq_printf(p, "%10u ", tau_interrupts(j)); 166 seq_printf(p, "%10u ", tau_interrupts(j));
145 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 167 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
146 } 168 }
147 #endif 169 #endif
148 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) 170 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
149 /* should this be per processor send/receive? */ 171 /* should this be per processor send/receive? */
150 seq_printf(p, "IPI (recv/sent): %10u/%u\n", 172 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
151 atomic_read(&ipi_recv), atomic_read(&ipi_sent)); 173 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
152 #endif 174 #endif
153 #endif /* CONFIG_PPC32 */ 175 #endif /* CONFIG_PPC32 */
154 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); 176 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
155 } 177 }
156 return 0; 178 return 0;
157 } 179 }
158 180
159 #ifdef CONFIG_HOTPLUG_CPU 181 #ifdef CONFIG_HOTPLUG_CPU
160 void fixup_irqs(cpumask_t map) 182 void fixup_irqs(cpumask_t map)
161 { 183 {
162 unsigned int irq; 184 unsigned int irq;
163 static int warned; 185 static int warned;
164 186
165 for_each_irq(irq) { 187 for_each_irq(irq) {
166 cpumask_t mask; 188 cpumask_t mask;
167 189
168 if (irq_desc[irq].status & IRQ_PER_CPU) 190 if (irq_desc[irq].status & IRQ_PER_CPU)
169 continue; 191 continue;
170 192
171 cpus_and(mask, irq_desc[irq].affinity, map); 193 cpus_and(mask, irq_desc[irq].affinity, map);
172 if (any_online_cpu(mask) == NR_CPUS) { 194 if (any_online_cpu(mask) == NR_CPUS) {
173 printk("Breaking affinity for irq %i\n", irq); 195 printk("Breaking affinity for irq %i\n", irq);
174 mask = map; 196 mask = map;
175 } 197 }
176 if (irq_desc[irq].chip->set_affinity) 198 if (irq_desc[irq].chip->set_affinity)
177 irq_desc[irq].chip->set_affinity(irq, mask); 199 irq_desc[irq].chip->set_affinity(irq, mask);
178 else if (irq_desc[irq].action && !(warned++)) 200 else if (irq_desc[irq].action && !(warned++))
179 printk("Cannot set affinity for irq %i\n", irq); 201 printk("Cannot set affinity for irq %i\n", irq);
180 } 202 }
181 203
182 local_irq_enable(); 204 local_irq_enable();
183 mdelay(1); 205 mdelay(1);
184 local_irq_disable(); 206 local_irq_disable();
185 } 207 }
186 #endif 208 #endif
187 209
188 void do_IRQ(struct pt_regs *regs) 210 void do_IRQ(struct pt_regs *regs)
189 { 211 {
190 struct pt_regs *old_regs = set_irq_regs(regs); 212 struct pt_regs *old_regs = set_irq_regs(regs);
191 unsigned int irq; 213 unsigned int irq;
192 #ifdef CONFIG_IRQSTACKS 214 #ifdef CONFIG_IRQSTACKS
193 struct thread_info *curtp, *irqtp; 215 struct thread_info *curtp, *irqtp;
194 #endif 216 #endif
195 217
196 irq_enter(); 218 irq_enter();
197 219
198 #ifdef CONFIG_DEBUG_STACKOVERFLOW 220 #ifdef CONFIG_DEBUG_STACKOVERFLOW
199 /* Debugging check for stack overflow: is there less than 2KB free? */ 221 /* Debugging check for stack overflow: is there less than 2KB free? */
200 { 222 {
201 long sp; 223 long sp;
202 224
203 sp = __get_SP() & (THREAD_SIZE-1); 225 sp = __get_SP() & (THREAD_SIZE-1);
204 226
205 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 227 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
206 printk("do_IRQ: stack overflow: %ld\n", 228 printk("do_IRQ: stack overflow: %ld\n",
207 sp - sizeof(struct thread_info)); 229 sp - sizeof(struct thread_info));
208 dump_stack(); 230 dump_stack();
209 } 231 }
210 } 232 }
211 #endif 233 #endif
212 234
213 /* 235 /*
214 * Every platform is required to implement ppc_md.get_irq. 236 * Every platform is required to implement ppc_md.get_irq.
215 * This function will either return an irq number or -1 to 237 * This function will either return an irq number or -1 to
216 * indicate there are no more pending. 238 * indicate there are no more pending.
217 * The value -2 is for buggy hardware and means that this IRQ 239 * The value -2 is for buggy hardware and means that this IRQ
218 * has already been handled. -- Tom 240 * has already been handled. -- Tom
219 */ 241 */
220 irq = ppc_md.get_irq(); 242 irq = ppc_md.get_irq();
221 243
222 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { 244 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
223 #ifdef CONFIG_IRQSTACKS 245 #ifdef CONFIG_IRQSTACKS
224 /* Switch to the irq stack to handle this */ 246 /* Switch to the irq stack to handle this */
225 curtp = current_thread_info(); 247 curtp = current_thread_info();
226 irqtp = hardirq_ctx[smp_processor_id()]; 248 irqtp = hardirq_ctx[smp_processor_id()];
227 if (curtp != irqtp) { 249 if (curtp != irqtp) {
228 struct irq_desc *desc = irq_desc + irq; 250 struct irq_desc *desc = irq_desc + irq;
229 void *handler = desc->handle_irq; 251 void *handler = desc->handle_irq;
230 if (handler == NULL) 252 if (handler == NULL)
231 handler = &__do_IRQ; 253 handler = &__do_IRQ;
232 irqtp->task = curtp->task; 254 irqtp->task = curtp->task;
233 irqtp->flags = 0; 255 irqtp->flags = 0;
234 call_handle_irq(irq, desc, irqtp, handler); 256 call_handle_irq(irq, desc, irqtp, handler);
235 irqtp->task = NULL; 257 irqtp->task = NULL;
236 if (irqtp->flags) 258 if (irqtp->flags)
237 set_bits(irqtp->flags, &curtp->flags); 259 set_bits(irqtp->flags, &curtp->flags);
238 } else 260 } else
239 #endif 261 #endif
240 generic_handle_irq(irq); 262 generic_handle_irq(irq);
241 } else if (irq != NO_IRQ_IGNORE) 263 } else if (irq != NO_IRQ_IGNORE)
242 /* That's not SMP safe ... but who cares ? */ 264 /* That's not SMP safe ... but who cares ? */
243 ppc_spurious_interrupts++; 265 ppc_spurious_interrupts++;
244 266
245 irq_exit(); 267 irq_exit();
246 set_irq_regs(old_regs); 268 set_irq_regs(old_regs);
247 269
248 #ifdef CONFIG_PPC_ISERIES 270 #ifdef CONFIG_PPC_ISERIES
249 if (get_lppaca()->int_dword.fields.decr_int) { 271 if (get_lppaca()->int_dword.fields.decr_int) {
250 get_lppaca()->int_dword.fields.decr_int = 0; 272 get_lppaca()->int_dword.fields.decr_int = 0;
251 /* Signal a fake decrementer interrupt */ 273 /* Signal a fake decrementer interrupt */
252 timer_interrupt(regs); 274 timer_interrupt(regs);
253 } 275 }
254 #endif 276 #endif
255 } 277 }
256 278
257 void __init init_IRQ(void) 279 void __init init_IRQ(void)
258 { 280 {
259 ppc_md.init_IRQ(); 281 ppc_md.init_IRQ();
260 #ifdef CONFIG_PPC64 282 #ifdef CONFIG_PPC64
261 irq_ctx_init(); 283 irq_ctx_init();
262 #endif 284 #endif
263 } 285 }
264 286
265 287
266 #ifdef CONFIG_IRQSTACKS 288 #ifdef CONFIG_IRQSTACKS
267 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; 289 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
268 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; 290 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
269 291
270 void irq_ctx_init(void) 292 void irq_ctx_init(void)
271 { 293 {
272 struct thread_info *tp; 294 struct thread_info *tp;
273 int i; 295 int i;
274 296
275 for_each_possible_cpu(i) { 297 for_each_possible_cpu(i) {
276 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 298 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
277 tp = softirq_ctx[i]; 299 tp = softirq_ctx[i];
278 tp->cpu = i; 300 tp->cpu = i;
279 tp->preempt_count = SOFTIRQ_OFFSET; 301 tp->preempt_count = SOFTIRQ_OFFSET;
280 302
281 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 303 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
282 tp = hardirq_ctx[i]; 304 tp = hardirq_ctx[i];
283 tp->cpu = i; 305 tp->cpu = i;
284 tp->preempt_count = HARDIRQ_OFFSET; 306 tp->preempt_count = HARDIRQ_OFFSET;
285 } 307 }
286 } 308 }
287 309
288 static inline void do_softirq_onstack(void) 310 static inline void do_softirq_onstack(void)
289 { 311 {
290 struct thread_info *curtp, *irqtp; 312 struct thread_info *curtp, *irqtp;
291 313
292 curtp = current_thread_info(); 314 curtp = current_thread_info();
293 irqtp = softirq_ctx[smp_processor_id()]; 315 irqtp = softirq_ctx[smp_processor_id()];
294 irqtp->task = curtp->task; 316 irqtp->task = curtp->task;
295 call_do_softirq(irqtp); 317 call_do_softirq(irqtp);
296 irqtp->task = NULL; 318 irqtp->task = NULL;
297 } 319 }
298 320
299 #else 321 #else
300 #define do_softirq_onstack() __do_softirq() 322 #define do_softirq_onstack() __do_softirq()
301 #endif /* CONFIG_IRQSTACKS */ 323 #endif /* CONFIG_IRQSTACKS */
302 324
303 void do_softirq(void) 325 void do_softirq(void)
304 { 326 {
305 unsigned long flags; 327 unsigned long flags;
306 328
307 if (in_interrupt()) 329 if (in_interrupt())
308 return; 330 return;
309 331
310 local_irq_save(flags); 332 local_irq_save(flags);
311 333
312 if (local_softirq_pending()) 334 if (local_softirq_pending())
313 do_softirq_onstack(); 335 do_softirq_onstack();
314 336
315 local_irq_restore(flags); 337 local_irq_restore(flags);
316 } 338 }
317 EXPORT_SYMBOL(do_softirq); 339 EXPORT_SYMBOL(do_softirq);
318 340
319 341
320 /* 342 /*
321 * IRQ controller and virtual interrupts 343 * IRQ controller and virtual interrupts
322 */ 344 */
323 345
324 #ifdef CONFIG_PPC_MERGE 346 #ifdef CONFIG_PPC_MERGE
325 347
326 static LIST_HEAD(irq_hosts); 348 static LIST_HEAD(irq_hosts);
327 static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; 349 static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
328 static DEFINE_PER_CPU(unsigned int, irq_radix_reader); 350 static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
329 static unsigned int irq_radix_writer; 351 static unsigned int irq_radix_writer;
330 struct irq_map_entry irq_map[NR_IRQS]; 352 struct irq_map_entry irq_map[NR_IRQS];
331 static unsigned int irq_virq_count = NR_IRQS; 353 static unsigned int irq_virq_count = NR_IRQS;
332 static struct irq_host *irq_default_host; 354 static struct irq_host *irq_default_host;
333 355
334 struct irq_host *irq_alloc_host(unsigned int revmap_type, 356 struct irq_host *irq_alloc_host(unsigned int revmap_type,
335 unsigned int revmap_arg, 357 unsigned int revmap_arg,
336 struct irq_host_ops *ops, 358 struct irq_host_ops *ops,
337 irq_hw_number_t inval_irq) 359 irq_hw_number_t inval_irq)
338 { 360 {
339 struct irq_host *host; 361 struct irq_host *host;
340 unsigned int size = sizeof(struct irq_host); 362 unsigned int size = sizeof(struct irq_host);
341 unsigned int i; 363 unsigned int i;
342 unsigned int *rmap; 364 unsigned int *rmap;
343 unsigned long flags; 365 unsigned long flags;
344 366
345 /* Allocate structure and revmap table if using linear mapping */ 367 /* Allocate structure and revmap table if using linear mapping */
346 if (revmap_type == IRQ_HOST_MAP_LINEAR) 368 if (revmap_type == IRQ_HOST_MAP_LINEAR)
347 size += revmap_arg * sizeof(unsigned int); 369 size += revmap_arg * sizeof(unsigned int);
348 if (mem_init_done) 370 if (mem_init_done)
349 host = kzalloc(size, GFP_KERNEL); 371 host = kzalloc(size, GFP_KERNEL);
350 else { 372 else {
351 host = alloc_bootmem(size); 373 host = alloc_bootmem(size);
352 if (host) 374 if (host)
353 memset(host, 0, size); 375 memset(host, 0, size);
354 } 376 }
355 if (host == NULL) 377 if (host == NULL)
356 return NULL; 378 return NULL;
357 379
358 /* Fill structure */ 380 /* Fill structure */
359 host->revmap_type = revmap_type; 381 host->revmap_type = revmap_type;
360 host->inval_irq = inval_irq; 382 host->inval_irq = inval_irq;
361 host->ops = ops; 383 host->ops = ops;
362 384
363 spin_lock_irqsave(&irq_big_lock, flags); 385 spin_lock_irqsave(&irq_big_lock, flags);
364 386
365 /* If it's a legacy controller, check for duplicates and 387 /* If it's a legacy controller, check for duplicates and
366 * mark it as allocated (we use irq 0 host pointer for that 388 * mark it as allocated (we use irq 0 host pointer for that
367 */ 389 */
368 if (revmap_type == IRQ_HOST_MAP_LEGACY) { 390 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
369 if (irq_map[0].host != NULL) { 391 if (irq_map[0].host != NULL) {
370 spin_unlock_irqrestore(&irq_big_lock, flags); 392 spin_unlock_irqrestore(&irq_big_lock, flags);
371 /* If we are early boot, we can't free the structure, 393 /* If we are early boot, we can't free the structure,
372 * too bad... 394 * too bad...
373 * this will be fixed once slab is made available early 395 * this will be fixed once slab is made available early
374 * instead of the current cruft 396 * instead of the current cruft
375 */ 397 */
376 if (mem_init_done) 398 if (mem_init_done)
377 kfree(host); 399 kfree(host);
378 return NULL; 400 return NULL;
379 } 401 }
380 irq_map[0].host = host; 402 irq_map[0].host = host;
381 } 403 }
382 404
383 list_add(&host->link, &irq_hosts); 405 list_add(&host->link, &irq_hosts);
384 spin_unlock_irqrestore(&irq_big_lock, flags); 406 spin_unlock_irqrestore(&irq_big_lock, flags);
385 407
386 /* Additional setups per revmap type */ 408 /* Additional setups per revmap type */
387 switch(revmap_type) { 409 switch(revmap_type) {
388 case IRQ_HOST_MAP_LEGACY: 410 case IRQ_HOST_MAP_LEGACY:
389 /* 0 is always the invalid number for legacy */ 411 /* 0 is always the invalid number for legacy */
390 host->inval_irq = 0; 412 host->inval_irq = 0;
391 /* setup us as the host for all legacy interrupts */ 413 /* setup us as the host for all legacy interrupts */
392 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { 414 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
393 irq_map[i].hwirq = 0; 415 irq_map[i].hwirq = 0;
394 smp_wmb(); 416 smp_wmb();
395 irq_map[i].host = host; 417 irq_map[i].host = host;
396 smp_wmb(); 418 smp_wmb();
397 419
398 /* Clear norequest flags */ 420 /* Clear norequest flags */
399 get_irq_desc(i)->status &= ~IRQ_NOREQUEST; 421 get_irq_desc(i)->status &= ~IRQ_NOREQUEST;
400 422
401 /* Legacy flags are left to default at this point, 423 /* Legacy flags are left to default at this point,
402 * one can then use irq_create_mapping() to 424 * one can then use irq_create_mapping() to
403 * explicitely change them 425 * explicitely change them
404 */ 426 */
405 ops->map(host, i, i); 427 ops->map(host, i, i);
406 } 428 }
407 break; 429 break;
408 case IRQ_HOST_MAP_LINEAR: 430 case IRQ_HOST_MAP_LINEAR:
409 rmap = (unsigned int *)(host + 1); 431 rmap = (unsigned int *)(host + 1);
410 for (i = 0; i < revmap_arg; i++) 432 for (i = 0; i < revmap_arg; i++)
411 rmap[i] = IRQ_NONE; 433 rmap[i] = IRQ_NONE;
412 host->revmap_data.linear.size = revmap_arg; 434 host->revmap_data.linear.size = revmap_arg;
413 smp_wmb(); 435 smp_wmb();
414 host->revmap_data.linear.revmap = rmap; 436 host->revmap_data.linear.revmap = rmap;
415 break; 437 break;
416 default: 438 default:
417 break; 439 break;
418 } 440 }
419 441
420 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); 442 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
421 443
422 return host; 444 return host;
423 } 445 }
424 446
425 struct irq_host *irq_find_host(struct device_node *node) 447 struct irq_host *irq_find_host(struct device_node *node)
426 { 448 {
427 struct irq_host *h, *found = NULL; 449 struct irq_host *h, *found = NULL;
428 unsigned long flags; 450 unsigned long flags;
429 451
430 /* We might want to match the legacy controller last since 452 /* We might want to match the legacy controller last since
431 * it might potentially be set to match all interrupts in 453 * it might potentially be set to match all interrupts in
432 * the absence of a device node. This isn't a problem so far 454 * the absence of a device node. This isn't a problem so far
433 * yet though... 455 * yet though...
434 */ 456 */
435 spin_lock_irqsave(&irq_big_lock, flags); 457 spin_lock_irqsave(&irq_big_lock, flags);
436 list_for_each_entry(h, &irq_hosts, link) 458 list_for_each_entry(h, &irq_hosts, link)
437 if (h->ops->match == NULL || h->ops->match(h, node)) { 459 if (h->ops->match == NULL || h->ops->match(h, node)) {
438 found = h; 460 found = h;
439 break; 461 break;
440 } 462 }
441 spin_unlock_irqrestore(&irq_big_lock, flags); 463 spin_unlock_irqrestore(&irq_big_lock, flags);
442 return found; 464 return found;
443 } 465 }
444 EXPORT_SYMBOL_GPL(irq_find_host); 466 EXPORT_SYMBOL_GPL(irq_find_host);
445 467
446 void irq_set_default_host(struct irq_host *host) 468 void irq_set_default_host(struct irq_host *host)
447 { 469 {
448 pr_debug("irq: Default host set to @0x%p\n", host); 470 pr_debug("irq: Default host set to @0x%p\n", host);
449 471
450 irq_default_host = host; 472 irq_default_host = host;
451 } 473 }
452 474
453 void irq_set_virq_count(unsigned int count) 475 void irq_set_virq_count(unsigned int count)
454 { 476 {
455 pr_debug("irq: Trying to set virq count to %d\n", count); 477 pr_debug("irq: Trying to set virq count to %d\n", count);
456 478
457 BUG_ON(count < NUM_ISA_INTERRUPTS); 479 BUG_ON(count < NUM_ISA_INTERRUPTS);
458 if (count < NR_IRQS) 480 if (count < NR_IRQS)
459 irq_virq_count = count; 481 irq_virq_count = count;
460 } 482 }
461 483
462 /* radix tree not lockless safe ! we use a brlock-type mecanism 484 /* radix tree not lockless safe ! we use a brlock-type mecanism
463 * for now, until we can use a lockless radix tree 485 * for now, until we can use a lockless radix tree
464 */ 486 */
465 static void irq_radix_wrlock(unsigned long *flags) 487 static void irq_radix_wrlock(unsigned long *flags)
466 { 488 {
467 unsigned int cpu, ok; 489 unsigned int cpu, ok;
468 490
469 spin_lock_irqsave(&irq_big_lock, *flags); 491 spin_lock_irqsave(&irq_big_lock, *flags);
470 irq_radix_writer = 1; 492 irq_radix_writer = 1;
471 smp_mb(); 493 smp_mb();
472 do { 494 do {
473 barrier(); 495 barrier();
474 ok = 1; 496 ok = 1;
475 for_each_possible_cpu(cpu) { 497 for_each_possible_cpu(cpu) {
476 if (per_cpu(irq_radix_reader, cpu)) { 498 if (per_cpu(irq_radix_reader, cpu)) {
477 ok = 0; 499 ok = 0;
478 break; 500 break;
479 } 501 }
480 } 502 }
481 if (!ok) 503 if (!ok)
482 cpu_relax(); 504 cpu_relax();
483 } while(!ok); 505 } while(!ok);
484 } 506 }
485 507
486 static void irq_radix_wrunlock(unsigned long flags) 508 static void irq_radix_wrunlock(unsigned long flags)
487 { 509 {
488 smp_wmb(); 510 smp_wmb();
489 irq_radix_writer = 0; 511 irq_radix_writer = 0;
490 spin_unlock_irqrestore(&irq_big_lock, flags); 512 spin_unlock_irqrestore(&irq_big_lock, flags);
491 } 513 }
492 514
493 static void irq_radix_rdlock(unsigned long *flags) 515 static void irq_radix_rdlock(unsigned long *flags)
494 { 516 {
495 local_irq_save(*flags); 517 local_irq_save(*flags);
496 __get_cpu_var(irq_radix_reader) = 1; 518 __get_cpu_var(irq_radix_reader) = 1;
497 smp_mb(); 519 smp_mb();
498 if (likely(irq_radix_writer == 0)) 520 if (likely(irq_radix_writer == 0))
499 return; 521 return;
500 __get_cpu_var(irq_radix_reader) = 0; 522 __get_cpu_var(irq_radix_reader) = 0;
501 smp_wmb(); 523 smp_wmb();
502 spin_lock(&irq_big_lock); 524 spin_lock(&irq_big_lock);
503 __get_cpu_var(irq_radix_reader) = 1; 525 __get_cpu_var(irq_radix_reader) = 1;
504 spin_unlock(&irq_big_lock); 526 spin_unlock(&irq_big_lock);
505 } 527 }
506 528
507 static void irq_radix_rdunlock(unsigned long flags) 529 static void irq_radix_rdunlock(unsigned long flags)
508 { 530 {
509 __get_cpu_var(irq_radix_reader) = 0; 531 __get_cpu_var(irq_radix_reader) = 0;
510 local_irq_restore(flags); 532 local_irq_restore(flags);
511 } 533 }
512 534
513 535
514 unsigned int irq_create_mapping(struct irq_host *host, 536 unsigned int irq_create_mapping(struct irq_host *host,
515 irq_hw_number_t hwirq) 537 irq_hw_number_t hwirq)
516 { 538 {
517 unsigned int virq, hint; 539 unsigned int virq, hint;
518 540
519 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); 541 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
520 542
521 /* Look for default host if nececssary */ 543 /* Look for default host if nececssary */
522 if (host == NULL) 544 if (host == NULL)
523 host = irq_default_host; 545 host = irq_default_host;
524 if (host == NULL) { 546 if (host == NULL) {
525 printk(KERN_WARNING "irq_create_mapping called for" 547 printk(KERN_WARNING "irq_create_mapping called for"
526 " NULL host, hwirq=%lx\n", hwirq); 548 " NULL host, hwirq=%lx\n", hwirq);
527 WARN_ON(1); 549 WARN_ON(1);
528 return NO_IRQ; 550 return NO_IRQ;
529 } 551 }
530 pr_debug("irq: -> using host @%p\n", host); 552 pr_debug("irq: -> using host @%p\n", host);
531 553
532 /* Check if mapping already exist, if it does, call 554 /* Check if mapping already exist, if it does, call
533 * host->ops->map() to update the flags 555 * host->ops->map() to update the flags
534 */ 556 */
535 virq = irq_find_mapping(host, hwirq); 557 virq = irq_find_mapping(host, hwirq);
536 if (virq != IRQ_NONE) { 558 if (virq != IRQ_NONE) {
537 pr_debug("irq: -> existing mapping on virq %d\n", virq); 559 pr_debug("irq: -> existing mapping on virq %d\n", virq);
538 return virq; 560 return virq;
539 } 561 }
540 562
541 /* Get a virtual interrupt number */ 563 /* Get a virtual interrupt number */
542 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { 564 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
543 /* Handle legacy */ 565 /* Handle legacy */
544 virq = (unsigned int)hwirq; 566 virq = (unsigned int)hwirq;
545 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) 567 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
546 return NO_IRQ; 568 return NO_IRQ;
547 return virq; 569 return virq;
548 } else { 570 } else {
549 /* Allocate a virtual interrupt number */ 571 /* Allocate a virtual interrupt number */
550 hint = hwirq % irq_virq_count; 572 hint = hwirq % irq_virq_count;
551 virq = irq_alloc_virt(host, 1, hint); 573 virq = irq_alloc_virt(host, 1, hint);
552 if (virq == NO_IRQ) { 574 if (virq == NO_IRQ) {
553 pr_debug("irq: -> virq allocation failed\n"); 575 pr_debug("irq: -> virq allocation failed\n");
554 return NO_IRQ; 576 return NO_IRQ;
555 } 577 }
556 } 578 }
557 pr_debug("irq: -> obtained virq %d\n", virq); 579 pr_debug("irq: -> obtained virq %d\n", virq);
558 580
559 /* Clear IRQ_NOREQUEST flag */ 581 /* Clear IRQ_NOREQUEST flag */
560 get_irq_desc(virq)->status &= ~IRQ_NOREQUEST; 582 get_irq_desc(virq)->status &= ~IRQ_NOREQUEST;
561 583
562 /* map it */ 584 /* map it */
563 smp_wmb(); 585 smp_wmb();
564 irq_map[virq].hwirq = hwirq; 586 irq_map[virq].hwirq = hwirq;
565 smp_mb(); 587 smp_mb();
566 if (host->ops->map(host, virq, hwirq)) { 588 if (host->ops->map(host, virq, hwirq)) {
567 pr_debug("irq: -> mapping failed, freeing\n"); 589 pr_debug("irq: -> mapping failed, freeing\n");
568 irq_free_virt(virq, 1); 590 irq_free_virt(virq, 1);
569 return NO_IRQ; 591 return NO_IRQ;
570 } 592 }
571 return virq; 593 return virq;
572 } 594 }
573 EXPORT_SYMBOL_GPL(irq_create_mapping); 595 EXPORT_SYMBOL_GPL(irq_create_mapping);
574 596
575 unsigned int irq_create_of_mapping(struct device_node *controller, 597 unsigned int irq_create_of_mapping(struct device_node *controller,
576 u32 *intspec, unsigned int intsize) 598 u32 *intspec, unsigned int intsize)
577 { 599 {
578 struct irq_host *host; 600 struct irq_host *host;
579 irq_hw_number_t hwirq; 601 irq_hw_number_t hwirq;
580 unsigned int type = IRQ_TYPE_NONE; 602 unsigned int type = IRQ_TYPE_NONE;
581 unsigned int virq; 603 unsigned int virq;
582 604
583 if (controller == NULL) 605 if (controller == NULL)
584 host = irq_default_host; 606 host = irq_default_host;
585 else 607 else
586 host = irq_find_host(controller); 608 host = irq_find_host(controller);
587 if (host == NULL) { 609 if (host == NULL) {
588 printk(KERN_WARNING "irq: no irq host found for %s !\n", 610 printk(KERN_WARNING "irq: no irq host found for %s !\n",
589 controller->full_name); 611 controller->full_name);
590 return NO_IRQ; 612 return NO_IRQ;
591 } 613 }
592 614
593 /* If host has no translation, then we assume interrupt line */ 615 /* If host has no translation, then we assume interrupt line */
594 if (host->ops->xlate == NULL) 616 if (host->ops->xlate == NULL)
595 hwirq = intspec[0]; 617 hwirq = intspec[0];
596 else { 618 else {
597 if (host->ops->xlate(host, controller, intspec, intsize, 619 if (host->ops->xlate(host, controller, intspec, intsize,
598 &hwirq, &type)) 620 &hwirq, &type))
599 return NO_IRQ; 621 return NO_IRQ;
600 } 622 }
601 623
602 /* Create mapping */ 624 /* Create mapping */
603 virq = irq_create_mapping(host, hwirq); 625 virq = irq_create_mapping(host, hwirq);
604 if (virq == NO_IRQ) 626 if (virq == NO_IRQ)
605 return virq; 627 return virq;
606 628
607 /* Set type if specified and different than the current one */ 629 /* Set type if specified and different than the current one */
608 if (type != IRQ_TYPE_NONE && 630 if (type != IRQ_TYPE_NONE &&
609 type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK)) 631 type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK))
610 set_irq_type(virq, type); 632 set_irq_type(virq, type);
611 return virq; 633 return virq;
612 } 634 }
613 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 635 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
614 636
615 unsigned int irq_of_parse_and_map(struct device_node *dev, int index) 637 unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
616 { 638 {
617 struct of_irq oirq; 639 struct of_irq oirq;
618 640
619 if (of_irq_map_one(dev, index, &oirq)) 641 if (of_irq_map_one(dev, index, &oirq))
620 return NO_IRQ; 642 return NO_IRQ;
621 643
622 return irq_create_of_mapping(oirq.controller, oirq.specifier, 644 return irq_create_of_mapping(oirq.controller, oirq.specifier,
623 oirq.size); 645 oirq.size);
624 } 646 }
625 EXPORT_SYMBOL_GPL(irq_of_parse_and_map); 647 EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
626 648
627 void irq_dispose_mapping(unsigned int virq) 649 void irq_dispose_mapping(unsigned int virq)
628 { 650 {
629 struct irq_host *host = irq_map[virq].host; 651 struct irq_host *host = irq_map[virq].host;
630 irq_hw_number_t hwirq; 652 irq_hw_number_t hwirq;
631 unsigned long flags; 653 unsigned long flags;
632 654
633 WARN_ON (host == NULL); 655 WARN_ON (host == NULL);
634 if (host == NULL) 656 if (host == NULL)
635 return; 657 return;
636 658
637 /* Never unmap legacy interrupts */ 659 /* Never unmap legacy interrupts */
638 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 660 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
639 return; 661 return;
640 662
641 /* remove chip and handler */ 663 /* remove chip and handler */
642 set_irq_chip_and_handler(virq, NULL, NULL); 664 set_irq_chip_and_handler(virq, NULL, NULL);
643 665
644 /* Make sure it's completed */ 666 /* Make sure it's completed */
645 synchronize_irq(virq); 667 synchronize_irq(virq);
646 668
647 /* Tell the PIC about it */ 669 /* Tell the PIC about it */
648 if (host->ops->unmap) 670 if (host->ops->unmap)
649 host->ops->unmap(host, virq); 671 host->ops->unmap(host, virq);
650 smp_mb(); 672 smp_mb();
651 673
652 /* Clear reverse map */ 674 /* Clear reverse map */
653 hwirq = irq_map[virq].hwirq; 675 hwirq = irq_map[virq].hwirq;
654 switch(host->revmap_type) { 676 switch(host->revmap_type) {
655 case IRQ_HOST_MAP_LINEAR: 677 case IRQ_HOST_MAP_LINEAR:
656 if (hwirq < host->revmap_data.linear.size) 678 if (hwirq < host->revmap_data.linear.size)
657 host->revmap_data.linear.revmap[hwirq] = IRQ_NONE; 679 host->revmap_data.linear.revmap[hwirq] = IRQ_NONE;
658 break; 680 break;
659 case IRQ_HOST_MAP_TREE: 681 case IRQ_HOST_MAP_TREE:
660 /* Check if radix tree allocated yet */ 682 /* Check if radix tree allocated yet */
661 if (host->revmap_data.tree.gfp_mask == 0) 683 if (host->revmap_data.tree.gfp_mask == 0)
662 break; 684 break;
663 irq_radix_wrlock(&flags); 685 irq_radix_wrlock(&flags);
664 radix_tree_delete(&host->revmap_data.tree, hwirq); 686 radix_tree_delete(&host->revmap_data.tree, hwirq);
665 irq_radix_wrunlock(flags); 687 irq_radix_wrunlock(flags);
666 break; 688 break;
667 } 689 }
668 690
669 /* Destroy map */ 691 /* Destroy map */
670 smp_mb(); 692 smp_mb();
671 irq_map[virq].hwirq = host->inval_irq; 693 irq_map[virq].hwirq = host->inval_irq;
672 694
673 /* Set some flags */ 695 /* Set some flags */
674 get_irq_desc(virq)->status |= IRQ_NOREQUEST; 696 get_irq_desc(virq)->status |= IRQ_NOREQUEST;
675 697
676 /* Free it */ 698 /* Free it */
677 irq_free_virt(virq, 1); 699 irq_free_virt(virq, 1);
678 } 700 }
679 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 701 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
680 702
681 unsigned int irq_find_mapping(struct irq_host *host, 703 unsigned int irq_find_mapping(struct irq_host *host,
682 irq_hw_number_t hwirq) 704 irq_hw_number_t hwirq)
683 { 705 {
684 unsigned int i; 706 unsigned int i;
685 unsigned int hint = hwirq % irq_virq_count; 707 unsigned int hint = hwirq % irq_virq_count;
686 708
687 /* Look for default host if nececssary */ 709 /* Look for default host if nececssary */
688 if (host == NULL) 710 if (host == NULL)
689 host = irq_default_host; 711 host = irq_default_host;
690 if (host == NULL) 712 if (host == NULL)
691 return NO_IRQ; 713 return NO_IRQ;
692 714
693 /* legacy -> bail early */ 715 /* legacy -> bail early */
694 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 716 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
695 return hwirq; 717 return hwirq;
696 718
697 /* Slow path does a linear search of the map */ 719 /* Slow path does a linear search of the map */
698 if (hint < NUM_ISA_INTERRUPTS) 720 if (hint < NUM_ISA_INTERRUPTS)
699 hint = NUM_ISA_INTERRUPTS; 721 hint = NUM_ISA_INTERRUPTS;
700 i = hint; 722 i = hint;
701 do { 723 do {
702 if (irq_map[i].host == host && 724 if (irq_map[i].host == host &&
703 irq_map[i].hwirq == hwirq) 725 irq_map[i].hwirq == hwirq)
704 return i; 726 return i;
705 i++; 727 i++;
706 if (i >= irq_virq_count) 728 if (i >= irq_virq_count)
707 i = NUM_ISA_INTERRUPTS; 729 i = NUM_ISA_INTERRUPTS;
708 } while(i != hint); 730 } while(i != hint);
709 return NO_IRQ; 731 return NO_IRQ;
710 } 732 }
711 EXPORT_SYMBOL_GPL(irq_find_mapping); 733 EXPORT_SYMBOL_GPL(irq_find_mapping);
712 734
713 735
714 unsigned int irq_radix_revmap(struct irq_host *host, 736 unsigned int irq_radix_revmap(struct irq_host *host,
715 irq_hw_number_t hwirq) 737 irq_hw_number_t hwirq)
716 { 738 {
717 struct radix_tree_root *tree; 739 struct radix_tree_root *tree;
718 struct irq_map_entry *ptr; 740 struct irq_map_entry *ptr;
719 unsigned int virq; 741 unsigned int virq;
720 unsigned long flags; 742 unsigned long flags;
721 743
722 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 744 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
723 745
724 /* Check if the radix tree exist yet. We test the value of 746 /* Check if the radix tree exist yet. We test the value of
725 * the gfp_mask for that. Sneaky but saves another int in the 747 * the gfp_mask for that. Sneaky but saves another int in the
726 * structure. If not, we fallback to slow mode 748 * structure. If not, we fallback to slow mode
727 */ 749 */
728 tree = &host->revmap_data.tree; 750 tree = &host->revmap_data.tree;
729 if (tree->gfp_mask == 0) 751 if (tree->gfp_mask == 0)
730 return irq_find_mapping(host, hwirq); 752 return irq_find_mapping(host, hwirq);
731 753
732 /* Now try to resolve */ 754 /* Now try to resolve */
733 irq_radix_rdlock(&flags); 755 irq_radix_rdlock(&flags);
734 ptr = radix_tree_lookup(tree, hwirq); 756 ptr = radix_tree_lookup(tree, hwirq);
735 irq_radix_rdunlock(flags); 757 irq_radix_rdunlock(flags);
736 758
737 /* Found it, return */ 759 /* Found it, return */
738 if (ptr) { 760 if (ptr) {
739 virq = ptr - irq_map; 761 virq = ptr - irq_map;
740 return virq; 762 return virq;
741 } 763 }
742 764
743 /* If not there, try to insert it */ 765 /* If not there, try to insert it */
744 virq = irq_find_mapping(host, hwirq); 766 virq = irq_find_mapping(host, hwirq);
745 if (virq != NO_IRQ) { 767 if (virq != NO_IRQ) {
746 irq_radix_wrlock(&flags); 768 irq_radix_wrlock(&flags);
747 radix_tree_insert(tree, hwirq, &irq_map[virq]); 769 radix_tree_insert(tree, hwirq, &irq_map[virq]);
748 irq_radix_wrunlock(flags); 770 irq_radix_wrunlock(flags);
749 } 771 }
750 return virq; 772 return virq;
751 } 773 }
752 774
753 unsigned int irq_linear_revmap(struct irq_host *host, 775 unsigned int irq_linear_revmap(struct irq_host *host,
754 irq_hw_number_t hwirq) 776 irq_hw_number_t hwirq)
755 { 777 {
756 unsigned int *revmap; 778 unsigned int *revmap;
757 779
758 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); 780 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
759 781
760 /* Check revmap bounds */ 782 /* Check revmap bounds */
761 if (unlikely(hwirq >= host->revmap_data.linear.size)) 783 if (unlikely(hwirq >= host->revmap_data.linear.size))
762 return irq_find_mapping(host, hwirq); 784 return irq_find_mapping(host, hwirq);
763 785
764 /* Check if revmap was allocated */ 786 /* Check if revmap was allocated */
765 revmap = host->revmap_data.linear.revmap; 787 revmap = host->revmap_data.linear.revmap;
766 if (unlikely(revmap == NULL)) 788 if (unlikely(revmap == NULL))
767 return irq_find_mapping(host, hwirq); 789 return irq_find_mapping(host, hwirq);
768 790
769 /* Fill up revmap with slow path if no mapping found */ 791 /* Fill up revmap with slow path if no mapping found */
770 if (unlikely(revmap[hwirq] == NO_IRQ)) 792 if (unlikely(revmap[hwirq] == NO_IRQ))
771 revmap[hwirq] = irq_find_mapping(host, hwirq); 793 revmap[hwirq] = irq_find_mapping(host, hwirq);
772 794
773 return revmap[hwirq]; 795 return revmap[hwirq];
774 } 796 }
775 797
776 unsigned int irq_alloc_virt(struct irq_host *host, 798 unsigned int irq_alloc_virt(struct irq_host *host,
777 unsigned int count, 799 unsigned int count,
778 unsigned int hint) 800 unsigned int hint)
779 { 801 {
780 unsigned long flags; 802 unsigned long flags;
781 unsigned int i, j, found = NO_IRQ; 803 unsigned int i, j, found = NO_IRQ;
782 804
783 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) 805 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
784 return NO_IRQ; 806 return NO_IRQ;
785 807
786 spin_lock_irqsave(&irq_big_lock, flags); 808 spin_lock_irqsave(&irq_big_lock, flags);
787 809
788 /* Use hint for 1 interrupt if any */ 810 /* Use hint for 1 interrupt if any */
789 if (count == 1 && hint >= NUM_ISA_INTERRUPTS && 811 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
790 hint < irq_virq_count && irq_map[hint].host == NULL) { 812 hint < irq_virq_count && irq_map[hint].host == NULL) {
791 found = hint; 813 found = hint;
792 goto hint_found; 814 goto hint_found;
793 } 815 }
794 816
795 /* Look for count consecutive numbers in the allocatable 817 /* Look for count consecutive numbers in the allocatable
796 * (non-legacy) space 818 * (non-legacy) space
797 */ 819 */
798 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { 820 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
799 if (irq_map[i].host != NULL) 821 if (irq_map[i].host != NULL)
800 j = 0; 822 j = 0;
801 else 823 else
802 j++; 824 j++;
803 825
804 if (j == count) { 826 if (j == count) {
805 found = i - count + 1; 827 found = i - count + 1;
806 break; 828 break;
807 } 829 }
808 } 830 }
809 if (found == NO_IRQ) { 831 if (found == NO_IRQ) {
810 spin_unlock_irqrestore(&irq_big_lock, flags); 832 spin_unlock_irqrestore(&irq_big_lock, flags);
811 return NO_IRQ; 833 return NO_IRQ;
812 } 834 }
813 hint_found: 835 hint_found:
814 for (i = found; i < (found + count); i++) { 836 for (i = found; i < (found + count); i++) {
815 irq_map[i].hwirq = host->inval_irq; 837 irq_map[i].hwirq = host->inval_irq;
816 smp_wmb(); 838 smp_wmb();
817 irq_map[i].host = host; 839 irq_map[i].host = host;
818 } 840 }
819 spin_unlock_irqrestore(&irq_big_lock, flags); 841 spin_unlock_irqrestore(&irq_big_lock, flags);
820 return found; 842 return found;
821 } 843 }
822 844
823 void irq_free_virt(unsigned int virq, unsigned int count) 845 void irq_free_virt(unsigned int virq, unsigned int count)
824 { 846 {
825 unsigned long flags; 847 unsigned long flags;
826 unsigned int i; 848 unsigned int i;
827 849
828 WARN_ON (virq < NUM_ISA_INTERRUPTS); 850 WARN_ON (virq < NUM_ISA_INTERRUPTS);
829 WARN_ON (count == 0 || (virq + count) > irq_virq_count); 851 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
830 852
831 spin_lock_irqsave(&irq_big_lock, flags); 853 spin_lock_irqsave(&irq_big_lock, flags);
832 for (i = virq; i < (virq + count); i++) { 854 for (i = virq; i < (virq + count); i++) {
833 struct irq_host *host; 855 struct irq_host *host;
834 856
835 if (i < NUM_ISA_INTERRUPTS || 857 if (i < NUM_ISA_INTERRUPTS ||
836 (virq + count) > irq_virq_count) 858 (virq + count) > irq_virq_count)
837 continue; 859 continue;
838 860
839 host = irq_map[i].host; 861 host = irq_map[i].host;
840 irq_map[i].hwirq = host->inval_irq; 862 irq_map[i].hwirq = host->inval_irq;
841 smp_wmb(); 863 smp_wmb();
842 irq_map[i].host = NULL; 864 irq_map[i].host = NULL;
843 } 865 }
844 spin_unlock_irqrestore(&irq_big_lock, flags); 866 spin_unlock_irqrestore(&irq_big_lock, flags);
845 } 867 }
846 868
847 void irq_early_init(void) 869 void irq_early_init(void)
848 { 870 {
849 unsigned int i; 871 unsigned int i;
850 872
851 for (i = 0; i < NR_IRQS; i++) 873 for (i = 0; i < NR_IRQS; i++)
852 get_irq_desc(i)->status |= IRQ_NOREQUEST; 874 get_irq_desc(i)->status |= IRQ_NOREQUEST;
853 } 875 }
854 876
855 /* We need to create the radix trees late */ 877 /* We need to create the radix trees late */
856 static int irq_late_init(void) 878 static int irq_late_init(void)
857 { 879 {
858 struct irq_host *h; 880 struct irq_host *h;
859 unsigned long flags; 881 unsigned long flags;
860 882
861 irq_radix_wrlock(&flags); 883 irq_radix_wrlock(&flags);
862 list_for_each_entry(h, &irq_hosts, link) { 884 list_for_each_entry(h, &irq_hosts, link) {
863 if (h->revmap_type == IRQ_HOST_MAP_TREE) 885 if (h->revmap_type == IRQ_HOST_MAP_TREE)
864 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); 886 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
865 } 887 }
866 irq_radix_wrunlock(flags); 888 irq_radix_wrunlock(flags);
867 889
868 return 0; 890 return 0;
869 } 891 }
870 arch_initcall(irq_late_init); 892 arch_initcall(irq_late_init);
871 893
872 #endif /* CONFIG_PPC_MERGE */ 894 #endif /* CONFIG_PPC_MERGE */
873 895
874 #ifdef CONFIG_PCI_MSI 896 #ifdef CONFIG_PCI_MSI
875 int pci_enable_msi(struct pci_dev * pdev) 897 int pci_enable_msi(struct pci_dev * pdev)
876 { 898 {
877 if (ppc_md.enable_msi) 899 if (ppc_md.enable_msi)
878 return ppc_md.enable_msi(pdev); 900 return ppc_md.enable_msi(pdev);
879 else 901 else
880 return -1; 902 return -1;
881 } 903 }
882 EXPORT_SYMBOL(pci_enable_msi); 904 EXPORT_SYMBOL(pci_enable_msi);
883 905
884 void pci_disable_msi(struct pci_dev * pdev) 906 void pci_disable_msi(struct pci_dev * pdev)
885 { 907 {
886 if (ppc_md.disable_msi) 908 if (ppc_md.disable_msi)
887 ppc_md.disable_msi(pdev); 909 ppc_md.disable_msi(pdev);
888 } 910 }
889 EXPORT_SYMBOL(pci_disable_msi); 911 EXPORT_SYMBOL(pci_disable_msi);
890 912
891 void pci_scan_msi_device(struct pci_dev *dev) {} 913 void pci_scan_msi_device(struct pci_dev *dev) {}
892 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;} 914 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;}
893 void pci_disable_msix(struct pci_dev *dev) {} 915 void pci_disable_msix(struct pci_dev *dev) {}
894 void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} 916 void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
895 void disable_msi_mode(struct pci_dev *dev, int pos, int type) {} 917 void disable_msi_mode(struct pci_dev *dev, int pos, int type) {}
896 void pci_no_msi(void) {} 918 void pci_no_msi(void) {}
897 EXPORT_SYMBOL(pci_enable_msix); 919 EXPORT_SYMBOL(pci_enable_msix);
898 EXPORT_SYMBOL(pci_disable_msix); 920 EXPORT_SYMBOL(pci_disable_msix);
899 921
900 #endif 922 #endif
901 923
902 #ifdef CONFIG_PPC64 924 #ifdef CONFIG_PPC64
903 static int __init setup_noirqdistrib(char *str) 925 static int __init setup_noirqdistrib(char *str)
904 { 926 {
905 distribute_irqs = 0; 927 distribute_irqs = 0;
906 return 1; 928 return 1;
907 } 929 }
908 930
909 __setup("noirqdistrib", setup_noirqdistrib); 931 __setup("noirqdistrib", setup_noirqdistrib);
910 #endif /* CONFIG_PPC64 */ 932 #endif /* CONFIG_PPC64 */
911 933
arch/powerpc/kernel/ppc_ksyms.c
1 #include <linux/module.h> 1 #include <linux/module.h>
2 #include <linux/threads.h> 2 #include <linux/threads.h>
3 #include <linux/smp.h> 3 #include <linux/smp.h>
4 #include <linux/sched.h> 4 #include <linux/sched.h>
5 #include <linux/elfcore.h> 5 #include <linux/elfcore.h>
6 #include <linux/string.h> 6 #include <linux/string.h>
7 #include <linux/interrupt.h> 7 #include <linux/interrupt.h>
8 #include <linux/screen_info.h> 8 #include <linux/screen_info.h>
9 #include <linux/vt_kern.h> 9 #include <linux/vt_kern.h>
10 #include <linux/nvram.h> 10 #include <linux/nvram.h>
11 #include <linux/console.h> 11 #include <linux/console.h>
12 #include <linux/irq.h> 12 #include <linux/irq.h>
13 #include <linux/pci.h> 13 #include <linux/pci.h>
14 #include <linux/delay.h> 14 #include <linux/delay.h>
15 #include <linux/ide.h> 15 #include <linux/ide.h>
16 #include <linux/bitops.h> 16 #include <linux/bitops.h>
17 17
18 #include <asm/page.h> 18 #include <asm/page.h>
19 #include <asm/semaphore.h> 19 #include <asm/semaphore.h>
20 #include <asm/processor.h> 20 #include <asm/processor.h>
21 #include <asm/uaccess.h> 21 #include <asm/uaccess.h>
22 #include <asm/io.h> 22 #include <asm/io.h>
23 #include <asm/ide.h> 23 #include <asm/ide.h>
24 #include <asm/atomic.h> 24 #include <asm/atomic.h>
25 #include <asm/checksum.h> 25 #include <asm/checksum.h>
26 #include <asm/pgtable.h> 26 #include <asm/pgtable.h>
27 #include <asm/tlbflush.h> 27 #include <asm/tlbflush.h>
28 #include <linux/adb.h> 28 #include <linux/adb.h>
29 #include <linux/cuda.h> 29 #include <linux/cuda.h>
30 #include <linux/pmu.h> 30 #include <linux/pmu.h>
31 #include <asm/prom.h> 31 #include <asm/prom.h>
32 #include <asm/system.h> 32 #include <asm/system.h>
33 #include <asm/pci-bridge.h> 33 #include <asm/pci-bridge.h>
34 #include <asm/irq.h> 34 #include <asm/irq.h>
35 #include <asm/pmac_feature.h> 35 #include <asm/pmac_feature.h>
36 #include <asm/dma.h> 36 #include <asm/dma.h>
37 #include <asm/machdep.h> 37 #include <asm/machdep.h>
38 #include <asm/hw_irq.h> 38 #include <asm/hw_irq.h>
39 #include <asm/nvram.h> 39 #include <asm/nvram.h>
40 #include <asm/mmu_context.h> 40 #include <asm/mmu_context.h>
41 #include <asm/backlight.h> 41 #include <asm/backlight.h>
42 #include <asm/time.h> 42 #include <asm/time.h>
43 #include <asm/cputable.h> 43 #include <asm/cputable.h>
44 #include <asm/btext.h> 44 #include <asm/btext.h>
45 #include <asm/div64.h> 45 #include <asm/div64.h>
46 #include <asm/signal.h> 46 #include <asm/signal.h>
47 47
48 #ifdef CONFIG_8xx 48 #ifdef CONFIG_8xx
49 #include <asm/commproc.h> 49 #include <asm/commproc.h>
50 #endif 50 #endif
51 51
52 #ifdef CONFIG_PPC64
53 EXPORT_SYMBOL(local_irq_restore);
54 #endif
55
52 #ifdef CONFIG_PPC32 56 #ifdef CONFIG_PPC32
53 extern void transfer_to_handler(void); 57 extern void transfer_to_handler(void);
54 extern void do_IRQ(struct pt_regs *regs); 58 extern void do_IRQ(struct pt_regs *regs);
55 extern void machine_check_exception(struct pt_regs *regs); 59 extern void machine_check_exception(struct pt_regs *regs);
56 extern void alignment_exception(struct pt_regs *regs); 60 extern void alignment_exception(struct pt_regs *regs);
57 extern void program_check_exception(struct pt_regs *regs); 61 extern void program_check_exception(struct pt_regs *regs);
58 extern void single_step_exception(struct pt_regs *regs); 62 extern void single_step_exception(struct pt_regs *regs);
59 extern int sys_sigreturn(struct pt_regs *regs); 63 extern int sys_sigreturn(struct pt_regs *regs);
60 64
61 EXPORT_SYMBOL(clear_pages); 65 EXPORT_SYMBOL(clear_pages);
62 EXPORT_SYMBOL(ISA_DMA_THRESHOLD); 66 EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
63 EXPORT_SYMBOL(DMA_MODE_READ); 67 EXPORT_SYMBOL(DMA_MODE_READ);
64 EXPORT_SYMBOL(DMA_MODE_WRITE); 68 EXPORT_SYMBOL(DMA_MODE_WRITE);
65 EXPORT_SYMBOL(__div64_32); 69 EXPORT_SYMBOL(__div64_32);
66 70
67 EXPORT_SYMBOL(do_signal); 71 EXPORT_SYMBOL(do_signal);
68 EXPORT_SYMBOL(transfer_to_handler); 72 EXPORT_SYMBOL(transfer_to_handler);
69 EXPORT_SYMBOL(do_IRQ); 73 EXPORT_SYMBOL(do_IRQ);
70 EXPORT_SYMBOL(machine_check_exception); 74 EXPORT_SYMBOL(machine_check_exception);
71 EXPORT_SYMBOL(alignment_exception); 75 EXPORT_SYMBOL(alignment_exception);
72 EXPORT_SYMBOL(program_check_exception); 76 EXPORT_SYMBOL(program_check_exception);
73 EXPORT_SYMBOL(single_step_exception); 77 EXPORT_SYMBOL(single_step_exception);
74 EXPORT_SYMBOL(sys_sigreturn); 78 EXPORT_SYMBOL(sys_sigreturn);
75 #endif 79 #endif
76 80
77 EXPORT_SYMBOL(strcpy); 81 EXPORT_SYMBOL(strcpy);
78 EXPORT_SYMBOL(strncpy); 82 EXPORT_SYMBOL(strncpy);
79 EXPORT_SYMBOL(strcat); 83 EXPORT_SYMBOL(strcat);
80 EXPORT_SYMBOL(strlen); 84 EXPORT_SYMBOL(strlen);
81 EXPORT_SYMBOL(strcmp); 85 EXPORT_SYMBOL(strcmp);
82 EXPORT_SYMBOL(strcasecmp); 86 EXPORT_SYMBOL(strcasecmp);
83 EXPORT_SYMBOL(strncasecmp); 87 EXPORT_SYMBOL(strncasecmp);
84 88
85 EXPORT_SYMBOL(csum_partial); 89 EXPORT_SYMBOL(csum_partial);
86 EXPORT_SYMBOL(csum_partial_copy_generic); 90 EXPORT_SYMBOL(csum_partial_copy_generic);
87 EXPORT_SYMBOL(ip_fast_csum); 91 EXPORT_SYMBOL(ip_fast_csum);
88 EXPORT_SYMBOL(csum_tcpudp_magic); 92 EXPORT_SYMBOL(csum_tcpudp_magic);
89 93
90 EXPORT_SYMBOL(__copy_tofrom_user); 94 EXPORT_SYMBOL(__copy_tofrom_user);
91 EXPORT_SYMBOL(__clear_user); 95 EXPORT_SYMBOL(__clear_user);
92 EXPORT_SYMBOL(__strncpy_from_user); 96 EXPORT_SYMBOL(__strncpy_from_user);
93 EXPORT_SYMBOL(__strnlen_user); 97 EXPORT_SYMBOL(__strnlen_user);
94 #ifdef CONFIG_PPC64 98 #ifdef CONFIG_PPC64
95 EXPORT_SYMBOL(copy_4K_page); 99 EXPORT_SYMBOL(copy_4K_page);
96 #endif 100 #endif
97 101
98 #if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)) 102 #if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE))
99 EXPORT_SYMBOL(ppc_ide_md); 103 EXPORT_SYMBOL(ppc_ide_md);
100 #endif 104 #endif
101 105
102 #if defined(CONFIG_PCI) && defined(CONFIG_PPC32) 106 #if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
103 EXPORT_SYMBOL(isa_io_base); 107 EXPORT_SYMBOL(isa_io_base);
104 EXPORT_SYMBOL(isa_mem_base); 108 EXPORT_SYMBOL(isa_mem_base);
105 EXPORT_SYMBOL(pci_dram_offset); 109 EXPORT_SYMBOL(pci_dram_offset);
106 EXPORT_SYMBOL(pci_alloc_consistent); 110 EXPORT_SYMBOL(pci_alloc_consistent);
107 EXPORT_SYMBOL(pci_free_consistent); 111 EXPORT_SYMBOL(pci_free_consistent);
108 EXPORT_SYMBOL(pci_bus_io_base); 112 EXPORT_SYMBOL(pci_bus_io_base);
109 EXPORT_SYMBOL(pci_bus_io_base_phys); 113 EXPORT_SYMBOL(pci_bus_io_base_phys);
110 EXPORT_SYMBOL(pci_bus_mem_base_phys); 114 EXPORT_SYMBOL(pci_bus_mem_base_phys);
111 EXPORT_SYMBOL(pci_bus_to_hose); 115 EXPORT_SYMBOL(pci_bus_to_hose);
112 #endif /* CONFIG_PCI */ 116 #endif /* CONFIG_PCI */
113 117
114 EXPORT_SYMBOL(start_thread); 118 EXPORT_SYMBOL(start_thread);
115 EXPORT_SYMBOL(kernel_thread); 119 EXPORT_SYMBOL(kernel_thread);
116 120
117 EXPORT_SYMBOL(giveup_fpu); 121 EXPORT_SYMBOL(giveup_fpu);
118 #ifdef CONFIG_ALTIVEC 122 #ifdef CONFIG_ALTIVEC
119 EXPORT_SYMBOL(giveup_altivec); 123 EXPORT_SYMBOL(giveup_altivec);
120 #endif /* CONFIG_ALTIVEC */ 124 #endif /* CONFIG_ALTIVEC */
121 #ifdef CONFIG_SPE 125 #ifdef CONFIG_SPE
122 EXPORT_SYMBOL(giveup_spe); 126 EXPORT_SYMBOL(giveup_spe);
123 #endif /* CONFIG_SPE */ 127 #endif /* CONFIG_SPE */
124 128
125 #ifndef CONFIG_PPC64 129 #ifndef CONFIG_PPC64
126 EXPORT_SYMBOL(flush_instruction_cache); 130 EXPORT_SYMBOL(flush_instruction_cache);
127 EXPORT_SYMBOL(flush_tlb_kernel_range); 131 EXPORT_SYMBOL(flush_tlb_kernel_range);
128 EXPORT_SYMBOL(flush_tlb_page); 132 EXPORT_SYMBOL(flush_tlb_page);
129 EXPORT_SYMBOL(_tlbie); 133 EXPORT_SYMBOL(_tlbie);
130 #endif 134 #endif
131 EXPORT_SYMBOL(__flush_icache_range); 135 EXPORT_SYMBOL(__flush_icache_range);
132 EXPORT_SYMBOL(flush_dcache_range); 136 EXPORT_SYMBOL(flush_dcache_range);
133 137
134 #ifdef CONFIG_SMP 138 #ifdef CONFIG_SMP
135 #ifdef CONFIG_PPC32 139 #ifdef CONFIG_PPC32
136 EXPORT_SYMBOL(smp_hw_index); 140 EXPORT_SYMBOL(smp_hw_index);
137 #endif 141 #endif
138 #endif 142 #endif
139 143
140 #ifdef CONFIG_ADB 144 #ifdef CONFIG_ADB
141 EXPORT_SYMBOL(adb_request); 145 EXPORT_SYMBOL(adb_request);
142 EXPORT_SYMBOL(adb_register); 146 EXPORT_SYMBOL(adb_register);
143 EXPORT_SYMBOL(adb_unregister); 147 EXPORT_SYMBOL(adb_unregister);
144 EXPORT_SYMBOL(adb_poll); 148 EXPORT_SYMBOL(adb_poll);
145 EXPORT_SYMBOL(adb_try_handler_change); 149 EXPORT_SYMBOL(adb_try_handler_change);
146 #endif /* CONFIG_ADB */ 150 #endif /* CONFIG_ADB */
147 #ifdef CONFIG_ADB_CUDA 151 #ifdef CONFIG_ADB_CUDA
148 EXPORT_SYMBOL(cuda_request); 152 EXPORT_SYMBOL(cuda_request);
149 EXPORT_SYMBOL(cuda_poll); 153 EXPORT_SYMBOL(cuda_poll);
150 #endif /* CONFIG_ADB_CUDA */ 154 #endif /* CONFIG_ADB_CUDA */
151 #ifdef CONFIG_VT 155 #ifdef CONFIG_VT
152 EXPORT_SYMBOL(kd_mksound); 156 EXPORT_SYMBOL(kd_mksound);
153 #endif 157 #endif
154 EXPORT_SYMBOL(to_tm); 158 EXPORT_SYMBOL(to_tm);
155 159
156 #ifdef CONFIG_PPC32 160 #ifdef CONFIG_PPC32
157 long long __ashrdi3(long long, int); 161 long long __ashrdi3(long long, int);
158 long long __ashldi3(long long, int); 162 long long __ashldi3(long long, int);
159 long long __lshrdi3(long long, int); 163 long long __lshrdi3(long long, int);
160 EXPORT_SYMBOL(__ashrdi3); 164 EXPORT_SYMBOL(__ashrdi3);
161 EXPORT_SYMBOL(__ashldi3); 165 EXPORT_SYMBOL(__ashldi3);
162 EXPORT_SYMBOL(__lshrdi3); 166 EXPORT_SYMBOL(__lshrdi3);
163 #endif 167 #endif
164 168
165 EXPORT_SYMBOL(memcpy); 169 EXPORT_SYMBOL(memcpy);
166 EXPORT_SYMBOL(memset); 170 EXPORT_SYMBOL(memset);
167 EXPORT_SYMBOL(memmove); 171 EXPORT_SYMBOL(memmove);
168 EXPORT_SYMBOL(memcmp); 172 EXPORT_SYMBOL(memcmp);
169 EXPORT_SYMBOL(memchr); 173 EXPORT_SYMBOL(memchr);
170 174
171 #if defined(CONFIG_FB_VGA16_MODULE) 175 #if defined(CONFIG_FB_VGA16_MODULE)
172 EXPORT_SYMBOL(screen_info); 176 EXPORT_SYMBOL(screen_info);
173 #endif 177 #endif
174 178
175 #ifdef CONFIG_PPC32 179 #ifdef CONFIG_PPC32
176 EXPORT_SYMBOL(timer_interrupt); 180 EXPORT_SYMBOL(timer_interrupt);
177 EXPORT_SYMBOL(irq_desc); 181 EXPORT_SYMBOL(irq_desc);
178 EXPORT_SYMBOL(tb_ticks_per_jiffy); 182 EXPORT_SYMBOL(tb_ticks_per_jiffy);
179 EXPORT_SYMBOL(console_drivers); 183 EXPORT_SYMBOL(console_drivers);
180 EXPORT_SYMBOL(cacheable_memcpy); 184 EXPORT_SYMBOL(cacheable_memcpy);
181 #endif 185 #endif
182 186
183 #ifdef CONFIG_8xx 187 #ifdef CONFIG_8xx
184 EXPORT_SYMBOL(cpm_install_handler); 188 EXPORT_SYMBOL(cpm_install_handler);
185 EXPORT_SYMBOL(cpm_free_handler); 189 EXPORT_SYMBOL(cpm_free_handler);
186 #endif /* CONFIG_8xx */ 190 #endif /* CONFIG_8xx */
187 #if defined(CONFIG_8xx) || defined(CONFIG_40x) 191 #if defined(CONFIG_8xx) || defined(CONFIG_40x)
188 EXPORT_SYMBOL(__res); 192 EXPORT_SYMBOL(__res);
189 #endif 193 #endif
190 194
191 #ifdef CONFIG_PPC32 195 #ifdef CONFIG_PPC32
192 EXPORT_SYMBOL(next_mmu_context); 196 EXPORT_SYMBOL(next_mmu_context);
193 EXPORT_SYMBOL(set_context); 197 EXPORT_SYMBOL(set_context);
194 #endif 198 #endif
195 199
196 #ifdef CONFIG_PPC_STD_MMU_32 200 #ifdef CONFIG_PPC_STD_MMU_32
197 extern long mol_trampoline; 201 extern long mol_trampoline;
198 EXPORT_SYMBOL(mol_trampoline); /* For MOL */ 202 EXPORT_SYMBOL(mol_trampoline); /* For MOL */
199 EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ 203 EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
200 #ifdef CONFIG_SMP 204 #ifdef CONFIG_SMP
201 extern int mmu_hash_lock; 205 extern int mmu_hash_lock;
202 EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ 206 EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
203 #endif /* CONFIG_SMP */ 207 #endif /* CONFIG_SMP */
204 extern long *intercept_table; 208 extern long *intercept_table;
205 EXPORT_SYMBOL(intercept_table); 209 EXPORT_SYMBOL(intercept_table);
206 #endif /* CONFIG_PPC_STD_MMU_32 */ 210 #endif /* CONFIG_PPC_STD_MMU_32 */
207 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 211 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
208 EXPORT_SYMBOL(__mtdcr); 212 EXPORT_SYMBOL(__mtdcr);
209 EXPORT_SYMBOL(__mfdcr); 213 EXPORT_SYMBOL(__mfdcr);
210 #endif 214 #endif
211 215
arch/powerpc/kernel/setup_64.c
1 /* 1 /*
2 * 2 *
3 * Common boot and setup code. 3 * Common boot and setup code.
4 * 4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp 5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12 12
13 #undef DEBUG 13 #undef DEBUG
14 14
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/string.h> 16 #include <linux/string.h>
17 #include <linux/sched.h> 17 #include <linux/sched.h>
18 #include <linux/init.h> 18 #include <linux/init.h>
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/reboot.h> 20 #include <linux/reboot.h>
21 #include <linux/delay.h> 21 #include <linux/delay.h>
22 #include <linux/initrd.h> 22 #include <linux/initrd.h>
23 #include <linux/ide.h> 23 #include <linux/ide.h>
24 #include <linux/seq_file.h> 24 #include <linux/seq_file.h>
25 #include <linux/ioport.h> 25 #include <linux/ioport.h>
26 #include <linux/console.h> 26 #include <linux/console.h>
27 #include <linux/utsname.h> 27 #include <linux/utsname.h>
28 #include <linux/tty.h> 28 #include <linux/tty.h>
29 #include <linux/root_dev.h> 29 #include <linux/root_dev.h>
30 #include <linux/notifier.h> 30 #include <linux/notifier.h>
31 #include <linux/cpu.h> 31 #include <linux/cpu.h>
32 #include <linux/unistd.h> 32 #include <linux/unistd.h>
33 #include <linux/serial.h> 33 #include <linux/serial.h>
34 #include <linux/serial_8250.h> 34 #include <linux/serial_8250.h>
35 #include <linux/bootmem.h> 35 #include <linux/bootmem.h>
36 #include <asm/io.h> 36 #include <asm/io.h>
37 #include <asm/kdump.h> 37 #include <asm/kdump.h>
38 #include <asm/prom.h> 38 #include <asm/prom.h>
39 #include <asm/processor.h> 39 #include <asm/processor.h>
40 #include <asm/pgtable.h> 40 #include <asm/pgtable.h>
41 #include <asm/smp.h> 41 #include <asm/smp.h>
42 #include <asm/elf.h> 42 #include <asm/elf.h>
43 #include <asm/machdep.h> 43 #include <asm/machdep.h>
44 #include <asm/paca.h> 44 #include <asm/paca.h>
45 #include <asm/time.h> 45 #include <asm/time.h>
46 #include <asm/cputable.h> 46 #include <asm/cputable.h>
47 #include <asm/sections.h> 47 #include <asm/sections.h>
48 #include <asm/btext.h> 48 #include <asm/btext.h>
49 #include <asm/nvram.h> 49 #include <asm/nvram.h>
50 #include <asm/setup.h> 50 #include <asm/setup.h>
51 #include <asm/system.h> 51 #include <asm/system.h>
52 #include <asm/rtas.h> 52 #include <asm/rtas.h>
53 #include <asm/iommu.h> 53 #include <asm/iommu.h>
54 #include <asm/serial.h> 54 #include <asm/serial.h>
55 #include <asm/cache.h> 55 #include <asm/cache.h>
56 #include <asm/page.h> 56 #include <asm/page.h>
57 #include <asm/mmu.h> 57 #include <asm/mmu.h>
58 #include <asm/lmb.h> 58 #include <asm/lmb.h>
59 #include <asm/firmware.h> 59 #include <asm/firmware.h>
60 #include <asm/xmon.h> 60 #include <asm/xmon.h>
61 #include <asm/udbg.h> 61 #include <asm/udbg.h>
62 #include <asm/kexec.h> 62 #include <asm/kexec.h>
63 63
64 #include "setup.h" 64 #include "setup.h"
65 65
66 #ifdef DEBUG 66 #ifdef DEBUG
67 #define DBG(fmt...) udbg_printf(fmt) 67 #define DBG(fmt...) udbg_printf(fmt)
68 #else 68 #else
69 #define DBG(fmt...) 69 #define DBG(fmt...)
70 #endif 70 #endif
71 71
72 int have_of = 1; 72 int have_of = 1;
73 int boot_cpuid = 0; 73 int boot_cpuid = 0;
74 dev_t boot_dev; 74 dev_t boot_dev;
75 u64 ppc64_pft_size; 75 u64 ppc64_pft_size;
76 76
77 /* Pick defaults since we might want to patch instructions 77 /* Pick defaults since we might want to patch instructions
78 * before we've read this from the device tree. 78 * before we've read this from the device tree.
79 */ 79 */
80 struct ppc64_caches ppc64_caches = { 80 struct ppc64_caches ppc64_caches = {
81 .dline_size = 0x40, 81 .dline_size = 0x40,
82 .log_dline_size = 6, 82 .log_dline_size = 6,
83 .iline_size = 0x40, 83 .iline_size = 0x40,
84 .log_iline_size = 6 84 .log_iline_size = 6
85 }; 85 };
86 EXPORT_SYMBOL_GPL(ppc64_caches); 86 EXPORT_SYMBOL_GPL(ppc64_caches);
87 87
88 /* 88 /*
89 * These are used in binfmt_elf.c to put aux entries on the stack 89 * These are used in binfmt_elf.c to put aux entries on the stack
90 * for each elf executable being started. 90 * for each elf executable being started.
91 */ 91 */
92 int dcache_bsize; 92 int dcache_bsize;
93 int icache_bsize; 93 int icache_bsize;
94 int ucache_bsize; 94 int ucache_bsize;
95 95
96 #ifdef CONFIG_SMP 96 #ifdef CONFIG_SMP
97 97
98 static int smt_enabled_cmdline; 98 static int smt_enabled_cmdline;
99 99
100 /* Look for ibm,smt-enabled OF option */ 100 /* Look for ibm,smt-enabled OF option */
101 static void check_smt_enabled(void) 101 static void check_smt_enabled(void)
102 { 102 {
103 struct device_node *dn; 103 struct device_node *dn;
104 const char *smt_option; 104 const char *smt_option;
105 105
106 /* Allow the command line to overrule the OF option */ 106 /* Allow the command line to overrule the OF option */
107 if (smt_enabled_cmdline) 107 if (smt_enabled_cmdline)
108 return; 108 return;
109 109
110 dn = of_find_node_by_path("/options"); 110 dn = of_find_node_by_path("/options");
111 111
112 if (dn) { 112 if (dn) {
113 smt_option = get_property(dn, "ibm,smt-enabled", NULL); 113 smt_option = get_property(dn, "ibm,smt-enabled", NULL);
114 114
115 if (smt_option) { 115 if (smt_option) {
116 if (!strcmp(smt_option, "on")) 116 if (!strcmp(smt_option, "on"))
117 smt_enabled_at_boot = 1; 117 smt_enabled_at_boot = 1;
118 else if (!strcmp(smt_option, "off")) 118 else if (!strcmp(smt_option, "off"))
119 smt_enabled_at_boot = 0; 119 smt_enabled_at_boot = 0;
120 } 120 }
121 } 121 }
122 } 122 }
123 123
124 /* Look for smt-enabled= cmdline option */ 124 /* Look for smt-enabled= cmdline option */
125 static int __init early_smt_enabled(char *p) 125 static int __init early_smt_enabled(char *p)
126 { 126 {
127 smt_enabled_cmdline = 1; 127 smt_enabled_cmdline = 1;
128 128
129 if (!p) 129 if (!p)
130 return 0; 130 return 0;
131 131
132 if (!strcmp(p, "on") || !strcmp(p, "1")) 132 if (!strcmp(p, "on") || !strcmp(p, "1"))
133 smt_enabled_at_boot = 1; 133 smt_enabled_at_boot = 1;
134 else if (!strcmp(p, "off") || !strcmp(p, "0")) 134 else if (!strcmp(p, "off") || !strcmp(p, "0"))
135 smt_enabled_at_boot = 0; 135 smt_enabled_at_boot = 0;
136 136
137 return 0; 137 return 0;
138 } 138 }
139 early_param("smt-enabled", early_smt_enabled); 139 early_param("smt-enabled", early_smt_enabled);
140 140
141 #else 141 #else
142 #define check_smt_enabled() 142 #define check_smt_enabled()
143 #endif /* CONFIG_SMP */ 143 #endif /* CONFIG_SMP */
144 144
145 /* Put the paca pointer into r13 and SPRG3 */ 145 /* Put the paca pointer into r13 and SPRG3 */
146 void __init setup_paca(int cpu) 146 void __init setup_paca(int cpu)
147 { 147 {
148 local_paca = &paca[cpu]; 148 local_paca = &paca[cpu];
149 mtspr(SPRN_SPRG3, local_paca); 149 mtspr(SPRN_SPRG3, local_paca);
150 } 150 }
151 151
152 /* 152 /*
153 * Early initialization entry point. This is called by head.S 153 * Early initialization entry point. This is called by head.S
154 * with MMU translation disabled. We rely on the "feature" of 154 * with MMU translation disabled. We rely on the "feature" of
155 * the CPU that ignores the top 2 bits of the address in real 155 * the CPU that ignores the top 2 bits of the address in real
156 * mode so we can access kernel globals normally provided we 156 * mode so we can access kernel globals normally provided we
157 * only toy with things in the RMO region. From here, we do 157 * only toy with things in the RMO region. From here, we do
158 * some early parsing of the device-tree to setup out LMB 158 * some early parsing of the device-tree to setup out LMB
159 * data structures, and allocate & initialize the hash table 159 * data structures, and allocate & initialize the hash table
160 * and segment tables so we can start running with translation 160 * and segment tables so we can start running with translation
161 * enabled. 161 * enabled.
162 * 162 *
163 * It is this function which will call the probe() callback of 163 * It is this function which will call the probe() callback of
164 * the various platform types and copy the matching one to the 164 * the various platform types and copy the matching one to the
165 * global ppc_md structure. Your platform can eventually do 165 * global ppc_md structure. Your platform can eventually do
166 * some very early initializations from the probe() routine, but 166 * some very early initializations from the probe() routine, but
167 * this is not recommended, be very careful as, for example, the 167 * this is not recommended, be very careful as, for example, the
168 * device-tree is not accessible via normal means at this point. 168 * device-tree is not accessible via normal means at this point.
169 */ 169 */
170 170
171 void __init early_setup(unsigned long dt_ptr) 171 void __init early_setup(unsigned long dt_ptr)
172 { 172 {
173 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ 173 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
174 setup_paca(0); 174 setup_paca(0);
175 175
176 /* Enable early debugging if any specified (see udbg.h) */ 176 /* Enable early debugging if any specified (see udbg.h) */
177 udbg_early_init(); 177 udbg_early_init();
178 178
179 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); 179 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
180 180
181 /* 181 /*
182 * Do early initializations using the flattened device 182 * Do early initializations using the flattened device
183 * tree, like retreiving the physical memory map or 183 * tree, like retreiving the physical memory map or
184 * calculating/retreiving the hash table size 184 * calculating/retreiving the hash table size
185 */ 185 */
186 early_init_devtree(__va(dt_ptr)); 186 early_init_devtree(__va(dt_ptr));
187 187
188 /* Now we know the logical id of our boot cpu, setup the paca. */ 188 /* Now we know the logical id of our boot cpu, setup the paca. */
189 setup_paca(boot_cpuid); 189 setup_paca(boot_cpuid);
190 190
191 /* Fix up paca fields required for the boot cpu */ 191 /* Fix up paca fields required for the boot cpu */
192 get_paca()->cpu_start = 1; 192 get_paca()->cpu_start = 1;
193 get_paca()->stab_real = __pa((u64)&initial_stab); 193 get_paca()->stab_real = __pa((u64)&initial_stab);
194 get_paca()->stab_addr = (u64)&initial_stab; 194 get_paca()->stab_addr = (u64)&initial_stab;
195 195
196 /* Probe the machine type */ 196 /* Probe the machine type */
197 probe_machine(); 197 probe_machine();
198 198
199 setup_kdump_trampoline(); 199 setup_kdump_trampoline();
200 200
201 DBG("Found, Initializing memory management...\n"); 201 DBG("Found, Initializing memory management...\n");
202 202
203 /* 203 /*
204 * Initialize the MMU Hash table and create the linear mapping 204 * Initialize the MMU Hash table and create the linear mapping
205 * of memory. Has to be done before stab/slb initialization as 205 * of memory. Has to be done before stab/slb initialization as
206 * this is currently where the page size encoding is obtained 206 * this is currently where the page size encoding is obtained
207 */ 207 */
208 htab_initialize(); 208 htab_initialize();
209 209
210 /* 210 /*
211 * Initialize stab / SLB management except on iSeries 211 * Initialize stab / SLB management except on iSeries
212 */ 212 */
213 if (cpu_has_feature(CPU_FTR_SLB)) 213 if (cpu_has_feature(CPU_FTR_SLB))
214 slb_initialize(); 214 slb_initialize();
215 else if (!firmware_has_feature(FW_FEATURE_ISERIES)) 215 else if (!firmware_has_feature(FW_FEATURE_ISERIES))
216 stab_initialize(get_paca()->stab_real); 216 stab_initialize(get_paca()->stab_real);
217 217
218 DBG(" <- early_setup()\n"); 218 DBG(" <- early_setup()\n");
219 } 219 }
220 220
221 #ifdef CONFIG_SMP 221 #ifdef CONFIG_SMP
222 void early_setup_secondary(void) 222 void early_setup_secondary(void)
223 { 223 {
224 struct paca_struct *lpaca = get_paca(); 224 struct paca_struct *lpaca = get_paca();
225 225
226 /* Mark enabled in PACA */ 226 /* Mark interrupts enabled in PACA */
227 lpaca->proc_enabled = 0; 227 lpaca->soft_enabled = 0;
228 228
229 /* Initialize hash table for that CPU */ 229 /* Initialize hash table for that CPU */
230 htab_initialize_secondary(); 230 htab_initialize_secondary();
231 231
232 /* Initialize STAB/SLB. We use a virtual address as it works 232 /* Initialize STAB/SLB. We use a virtual address as it works
233 * in real mode on pSeries and we want a virutal address on 233 * in real mode on pSeries and we want a virutal address on
234 * iSeries anyway 234 * iSeries anyway
235 */ 235 */
236 if (cpu_has_feature(CPU_FTR_SLB)) 236 if (cpu_has_feature(CPU_FTR_SLB))
237 slb_initialize(); 237 slb_initialize();
238 else 238 else
239 stab_initialize(lpaca->stab_addr); 239 stab_initialize(lpaca->stab_addr);
240 } 240 }
241 241
242 #endif /* CONFIG_SMP */ 242 #endif /* CONFIG_SMP */
243 243
244 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 244 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
245 void smp_release_cpus(void) 245 void smp_release_cpus(void)
246 { 246 {
247 extern unsigned long __secondary_hold_spinloop; 247 extern unsigned long __secondary_hold_spinloop;
248 unsigned long *ptr; 248 unsigned long *ptr;
249 249
250 DBG(" -> smp_release_cpus()\n"); 250 DBG(" -> smp_release_cpus()\n");
251 251
252 /* All secondary cpus are spinning on a common spinloop, release them 252 /* All secondary cpus are spinning on a common spinloop, release them
253 * all now so they can start to spin on their individual paca 253 * all now so they can start to spin on their individual paca
254 * spinloops. For non SMP kernels, the secondary cpus never get out 254 * spinloops. For non SMP kernels, the secondary cpus never get out
255 * of the common spinloop. 255 * of the common spinloop.
256 * This is useless but harmless on iSeries, secondaries are already 256 * This is useless but harmless on iSeries, secondaries are already
257 * waiting on their paca spinloops. */ 257 * waiting on their paca spinloops. */
258 258
259 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop 259 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
260 - PHYSICAL_START); 260 - PHYSICAL_START);
261 *ptr = 1; 261 *ptr = 1;
262 mb(); 262 mb();
263 263
264 DBG(" <- smp_release_cpus()\n"); 264 DBG(" <- smp_release_cpus()\n");
265 } 265 }
266 #endif /* CONFIG_SMP || CONFIG_KEXEC */ 266 #endif /* CONFIG_SMP || CONFIG_KEXEC */
267 267
268 /* 268 /*
269 * Initialize some remaining members of the ppc64_caches and systemcfg 269 * Initialize some remaining members of the ppc64_caches and systemcfg
270 * structures 270 * structures
271 * (at least until we get rid of them completely). This is mostly some 271 * (at least until we get rid of them completely). This is mostly some
272 * cache informations about the CPU that will be used by cache flush 272 * cache informations about the CPU that will be used by cache flush
273 * routines and/or provided to userland 273 * routines and/or provided to userland
274 */ 274 */
275 static void __init initialize_cache_info(void) 275 static void __init initialize_cache_info(void)
276 { 276 {
277 struct device_node *np; 277 struct device_node *np;
278 unsigned long num_cpus = 0; 278 unsigned long num_cpus = 0;
279 279
280 DBG(" -> initialize_cache_info()\n"); 280 DBG(" -> initialize_cache_info()\n");
281 281
282 for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) { 282 for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
283 num_cpus += 1; 283 num_cpus += 1;
284 284
285 /* We're assuming *all* of the CPUs have the same 285 /* We're assuming *all* of the CPUs have the same
286 * d-cache and i-cache sizes... -Peter 286 * d-cache and i-cache sizes... -Peter
287 */ 287 */
288 288
289 if ( num_cpus == 1 ) { 289 if ( num_cpus == 1 ) {
290 const u32 *sizep, *lsizep; 290 const u32 *sizep, *lsizep;
291 u32 size, lsize; 291 u32 size, lsize;
292 const char *dc, *ic; 292 const char *dc, *ic;
293 293
294 /* Then read cache informations */ 294 /* Then read cache informations */
295 if (machine_is(powermac)) { 295 if (machine_is(powermac)) {
296 dc = "d-cache-block-size"; 296 dc = "d-cache-block-size";
297 ic = "i-cache-block-size"; 297 ic = "i-cache-block-size";
298 } else { 298 } else {
299 dc = "d-cache-line-size"; 299 dc = "d-cache-line-size";
300 ic = "i-cache-line-size"; 300 ic = "i-cache-line-size";
301 } 301 }
302 302
303 size = 0; 303 size = 0;
304 lsize = cur_cpu_spec->dcache_bsize; 304 lsize = cur_cpu_spec->dcache_bsize;
305 sizep = get_property(np, "d-cache-size", NULL); 305 sizep = get_property(np, "d-cache-size", NULL);
306 if (sizep != NULL) 306 if (sizep != NULL)
307 size = *sizep; 307 size = *sizep;
308 lsizep = get_property(np, dc, NULL); 308 lsizep = get_property(np, dc, NULL);
309 if (lsizep != NULL) 309 if (lsizep != NULL)
310 lsize = *lsizep; 310 lsize = *lsizep;
311 if (sizep == 0 || lsizep == 0) 311 if (sizep == 0 || lsizep == 0)
312 DBG("Argh, can't find dcache properties ! " 312 DBG("Argh, can't find dcache properties ! "
313 "sizep: %p, lsizep: %p\n", sizep, lsizep); 313 "sizep: %p, lsizep: %p\n", sizep, lsizep);
314 314
315 ppc64_caches.dsize = size; 315 ppc64_caches.dsize = size;
316 ppc64_caches.dline_size = lsize; 316 ppc64_caches.dline_size = lsize;
317 ppc64_caches.log_dline_size = __ilog2(lsize); 317 ppc64_caches.log_dline_size = __ilog2(lsize);
318 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize; 318 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
319 319
320 size = 0; 320 size = 0;
321 lsize = cur_cpu_spec->icache_bsize; 321 lsize = cur_cpu_spec->icache_bsize;
322 sizep = get_property(np, "i-cache-size", NULL); 322 sizep = get_property(np, "i-cache-size", NULL);
323 if (sizep != NULL) 323 if (sizep != NULL)
324 size = *sizep; 324 size = *sizep;
325 lsizep = get_property(np, ic, NULL); 325 lsizep = get_property(np, ic, NULL);
326 if (lsizep != NULL) 326 if (lsizep != NULL)
327 lsize = *lsizep; 327 lsize = *lsizep;
328 if (sizep == 0 || lsizep == 0) 328 if (sizep == 0 || lsizep == 0)
329 DBG("Argh, can't find icache properties ! " 329 DBG("Argh, can't find icache properties ! "
330 "sizep: %p, lsizep: %p\n", sizep, lsizep); 330 "sizep: %p, lsizep: %p\n", sizep, lsizep);
331 331
332 ppc64_caches.isize = size; 332 ppc64_caches.isize = size;
333 ppc64_caches.iline_size = lsize; 333 ppc64_caches.iline_size = lsize;
334 ppc64_caches.log_iline_size = __ilog2(lsize); 334 ppc64_caches.log_iline_size = __ilog2(lsize);
335 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize; 335 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
336 } 336 }
337 } 337 }
338 338
339 DBG(" <- initialize_cache_info()\n"); 339 DBG(" <- initialize_cache_info()\n");
340 } 340 }
341 341
342 342
343 /* 343 /*
344 * Do some initial setup of the system. The parameters are those which 344 * Do some initial setup of the system. The parameters are those which
345 * were passed in from the bootloader. 345 * were passed in from the bootloader.
346 */ 346 */
347 void __init setup_system(void) 347 void __init setup_system(void)
348 { 348 {
349 DBG(" -> setup_system()\n"); 349 DBG(" -> setup_system()\n");
350 350
351 /* 351 /*
352 * Unflatten the device-tree passed by prom_init or kexec 352 * Unflatten the device-tree passed by prom_init or kexec
353 */ 353 */
354 unflatten_device_tree(); 354 unflatten_device_tree();
355 355
356 /* 356 /*
357 * Fill the ppc64_caches & systemcfg structures with informations 357 * Fill the ppc64_caches & systemcfg structures with informations
358 * retrieved from the device-tree. 358 * retrieved from the device-tree.
359 */ 359 */
360 initialize_cache_info(); 360 initialize_cache_info();
361 361
362 /* 362 /*
363 * Initialize irq remapping subsystem 363 * Initialize irq remapping subsystem
364 */ 364 */
365 irq_early_init(); 365 irq_early_init();
366 366
367 #ifdef CONFIG_PPC_RTAS 367 #ifdef CONFIG_PPC_RTAS
368 /* 368 /*
369 * Initialize RTAS if available 369 * Initialize RTAS if available
370 */ 370 */
371 rtas_initialize(); 371 rtas_initialize();
372 #endif /* CONFIG_PPC_RTAS */ 372 #endif /* CONFIG_PPC_RTAS */
373 373
374 /* 374 /*
375 * Check if we have an initrd provided via the device-tree 375 * Check if we have an initrd provided via the device-tree
376 */ 376 */
377 check_for_initrd(); 377 check_for_initrd();
378 378
379 /* 379 /*
380 * Do some platform specific early initializations, that includes 380 * Do some platform specific early initializations, that includes
381 * setting up the hash table pointers. It also sets up some interrupt-mapping 381 * setting up the hash table pointers. It also sets up some interrupt-mapping
382 * related options that will be used by finish_device_tree() 382 * related options that will be used by finish_device_tree()
383 */ 383 */
384 ppc_md.init_early(); 384 ppc_md.init_early();
385 385
386 /* 386 /*
387 * We can discover serial ports now since the above did setup the 387 * We can discover serial ports now since the above did setup the
388 * hash table management for us, thus ioremap works. We do that early 388 * hash table management for us, thus ioremap works. We do that early
389 * so that further code can be debugged 389 * so that further code can be debugged
390 */ 390 */
391 find_legacy_serial_ports(); 391 find_legacy_serial_ports();
392 392
393 /* 393 /*
394 * Register early console 394 * Register early console
395 */ 395 */
396 register_early_udbg_console(); 396 register_early_udbg_console();
397 397
398 /* 398 /*
399 * Initialize xmon 399 * Initialize xmon
400 */ 400 */
401 xmon_setup(); 401 xmon_setup();
402 402
403 check_smt_enabled(); 403 check_smt_enabled();
404 smp_setup_cpu_maps(); 404 smp_setup_cpu_maps();
405 405
406 #ifdef CONFIG_SMP 406 #ifdef CONFIG_SMP
407 /* Release secondary cpus out of their spinloops at 0x60 now that 407 /* Release secondary cpus out of their spinloops at 0x60 now that
408 * we can map physical -> logical CPU ids 408 * we can map physical -> logical CPU ids
409 */ 409 */
410 smp_release_cpus(); 410 smp_release_cpus();
411 #endif 411 #endif
412 412
413 printk("Starting Linux PPC64 %s\n", init_utsname()->version); 413 printk("Starting Linux PPC64 %s\n", init_utsname()->version);
414 414
415 printk("-----------------------------------------------------\n"); 415 printk("-----------------------------------------------------\n");
416 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 416 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
417 printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); 417 printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size());
418 printk("ppc64_caches.dcache_line_size = 0x%x\n", 418 printk("ppc64_caches.dcache_line_size = 0x%x\n",
419 ppc64_caches.dline_size); 419 ppc64_caches.dline_size);
420 printk("ppc64_caches.icache_line_size = 0x%x\n", 420 printk("ppc64_caches.icache_line_size = 0x%x\n",
421 ppc64_caches.iline_size); 421 ppc64_caches.iline_size);
422 printk("htab_address = 0x%p\n", htab_address); 422 printk("htab_address = 0x%p\n", htab_address);
423 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 423 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
424 #if PHYSICAL_START > 0 424 #if PHYSICAL_START > 0
425 printk("physical_start = 0x%x\n", PHYSICAL_START); 425 printk("physical_start = 0x%x\n", PHYSICAL_START);
426 #endif 426 #endif
427 printk("-----------------------------------------------------\n"); 427 printk("-----------------------------------------------------\n");
428 428
429 DBG(" <- setup_system()\n"); 429 DBG(" <- setup_system()\n");
430 } 430 }
431 431
432 #ifdef CONFIG_IRQSTACKS 432 #ifdef CONFIG_IRQSTACKS
433 static void __init irqstack_early_init(void) 433 static void __init irqstack_early_init(void)
434 { 434 {
435 unsigned int i; 435 unsigned int i;
436 436
437 /* 437 /*
438 * interrupt stacks must be under 256MB, we cannot afford to take 438 * interrupt stacks must be under 256MB, we cannot afford to take
439 * SLB misses on them. 439 * SLB misses on them.
440 */ 440 */
441 for_each_possible_cpu(i) { 441 for_each_possible_cpu(i) {
442 softirq_ctx[i] = (struct thread_info *) 442 softirq_ctx[i] = (struct thread_info *)
443 __va(lmb_alloc_base(THREAD_SIZE, 443 __va(lmb_alloc_base(THREAD_SIZE,
444 THREAD_SIZE, 0x10000000)); 444 THREAD_SIZE, 0x10000000));
445 hardirq_ctx[i] = (struct thread_info *) 445 hardirq_ctx[i] = (struct thread_info *)
446 __va(lmb_alloc_base(THREAD_SIZE, 446 __va(lmb_alloc_base(THREAD_SIZE,
447 THREAD_SIZE, 0x10000000)); 447 THREAD_SIZE, 0x10000000));
448 } 448 }
449 } 449 }
450 #else 450 #else
451 #define irqstack_early_init() 451 #define irqstack_early_init()
452 #endif 452 #endif
453 453
454 /* 454 /*
455 * Stack space used when we detect a bad kernel stack pointer, and 455 * Stack space used when we detect a bad kernel stack pointer, and
456 * early in SMP boots before relocation is enabled. 456 * early in SMP boots before relocation is enabled.
457 */ 457 */
458 static void __init emergency_stack_init(void) 458 static void __init emergency_stack_init(void)
459 { 459 {
460 unsigned long limit; 460 unsigned long limit;
461 unsigned int i; 461 unsigned int i;
462 462
463 /* 463 /*
464 * Emergency stacks must be under 256MB, we cannot afford to take 464 * Emergency stacks must be under 256MB, we cannot afford to take
465 * SLB misses on them. The ABI also requires them to be 128-byte 465 * SLB misses on them. The ABI also requires them to be 128-byte
466 * aligned. 466 * aligned.
467 * 467 *
468 * Since we use these as temporary stacks during secondary CPU 468 * Since we use these as temporary stacks during secondary CPU
469 * bringup, we need to get at them in real mode. This means they 469 * bringup, we need to get at them in real mode. This means they
470 * must also be within the RMO region. 470 * must also be within the RMO region.
471 */ 471 */
472 limit = min(0x10000000UL, lmb.rmo_size); 472 limit = min(0x10000000UL, lmb.rmo_size);
473 473
474 for_each_possible_cpu(i) 474 for_each_possible_cpu(i)
475 paca[i].emergency_sp = 475 paca[i].emergency_sp =
476 __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE; 476 __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
477 } 477 }
478 478
479 /* 479 /*
480 * Called into from start_kernel, after lock_kernel has been called. 480 * Called into from start_kernel, after lock_kernel has been called.
481 * Initializes bootmem, which is unsed to manage page allocation until 481 * Initializes bootmem, which is unsed to manage page allocation until
482 * mem_init is called. 482 * mem_init is called.
483 */ 483 */
484 void __init setup_arch(char **cmdline_p) 484 void __init setup_arch(char **cmdline_p)
485 { 485 {
486 ppc64_boot_msg(0x12, "Setup Arch"); 486 ppc64_boot_msg(0x12, "Setup Arch");
487 487
488 *cmdline_p = cmd_line; 488 *cmdline_p = cmd_line;
489 489
490 /* 490 /*
491 * Set cache line size based on type of cpu as a default. 491 * Set cache line size based on type of cpu as a default.
492 * Systems with OF can look in the properties on the cpu node(s) 492 * Systems with OF can look in the properties on the cpu node(s)
493 * for a possibly more accurate value. 493 * for a possibly more accurate value.
494 */ 494 */
495 dcache_bsize = ppc64_caches.dline_size; 495 dcache_bsize = ppc64_caches.dline_size;
496 icache_bsize = ppc64_caches.iline_size; 496 icache_bsize = ppc64_caches.iline_size;
497 497
498 /* reboot on panic */ 498 /* reboot on panic */
499 panic_timeout = 180; 499 panic_timeout = 180;
500 500
501 if (ppc_md.panic) 501 if (ppc_md.panic)
502 setup_panic(); 502 setup_panic();
503 503
504 init_mm.start_code = PAGE_OFFSET; 504 init_mm.start_code = PAGE_OFFSET;
505 init_mm.end_code = (unsigned long) _etext; 505 init_mm.end_code = (unsigned long) _etext;
506 init_mm.end_data = (unsigned long) _edata; 506 init_mm.end_data = (unsigned long) _edata;
507 init_mm.brk = klimit; 507 init_mm.brk = klimit;
508 508
509 irqstack_early_init(); 509 irqstack_early_init();
510 emergency_stack_init(); 510 emergency_stack_init();
511 511
512 stabs_alloc(); 512 stabs_alloc();
513 513
514 /* set up the bootmem stuff with available memory */ 514 /* set up the bootmem stuff with available memory */
515 do_init_bootmem(); 515 do_init_bootmem();
516 sparse_init(); 516 sparse_init();
517 517
518 #ifdef CONFIG_DUMMY_CONSOLE 518 #ifdef CONFIG_DUMMY_CONSOLE
519 conswitchp = &dummy_con; 519 conswitchp = &dummy_con;
520 #endif 520 #endif
521 521
522 ppc_md.setup_arch(); 522 ppc_md.setup_arch();
523 523
524 paging_init(); 524 paging_init();
525 ppc64_boot_msg(0x15, "Setup Done"); 525 ppc64_boot_msg(0x15, "Setup Done");
526 } 526 }
527 527
528 528
529 /* ToDo: do something useful if ppc_md is not yet setup. */ 529 /* ToDo: do something useful if ppc_md is not yet setup. */
530 #define PPC64_LINUX_FUNCTION 0x0f000000 530 #define PPC64_LINUX_FUNCTION 0x0f000000
531 #define PPC64_IPL_MESSAGE 0xc0000000 531 #define PPC64_IPL_MESSAGE 0xc0000000
532 #define PPC64_TERM_MESSAGE 0xb0000000 532 #define PPC64_TERM_MESSAGE 0xb0000000
533 533
534 static void ppc64_do_msg(unsigned int src, const char *msg) 534 static void ppc64_do_msg(unsigned int src, const char *msg)
535 { 535 {
536 if (ppc_md.progress) { 536 if (ppc_md.progress) {
537 char buf[128]; 537 char buf[128];
538 538
539 sprintf(buf, "%08X\n", src); 539 sprintf(buf, "%08X\n", src);
540 ppc_md.progress(buf, 0); 540 ppc_md.progress(buf, 0);
541 snprintf(buf, 128, "%s", msg); 541 snprintf(buf, 128, "%s", msg);
542 ppc_md.progress(buf, 0); 542 ppc_md.progress(buf, 0);
543 } 543 }
544 } 544 }
545 545
546 /* Print a boot progress message. */ 546 /* Print a boot progress message. */
547 void ppc64_boot_msg(unsigned int src, const char *msg) 547 void ppc64_boot_msg(unsigned int src, const char *msg)
548 { 548 {
549 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg); 549 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
550 printk("[boot]%04x %s\n", src, msg); 550 printk("[boot]%04x %s\n", src, msg);
551 } 551 }
552 552
553 /* Print a termination message (print only -- does not stop the kernel) */ 553 /* Print a termination message (print only -- does not stop the kernel) */
554 void ppc64_terminate_msg(unsigned int src, const char *msg) 554 void ppc64_terminate_msg(unsigned int src, const char *msg)
555 { 555 {
556 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg); 556 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg);
557 printk("[terminate]%04x %s\n", src, msg); 557 printk("[terminate]%04x %s\n", src, msg);
558 } 558 }
559 559
560 void cpu_die(void) 560 void cpu_die(void)
561 { 561 {
562 if (ppc_md.cpu_die) 562 if (ppc_md.cpu_die)
563 ppc_md.cpu_die(); 563 ppc_md.cpu_die();
564 } 564 }
565 565
566 #ifdef CONFIG_SMP 566 #ifdef CONFIG_SMP
567 void __init setup_per_cpu_areas(void) 567 void __init setup_per_cpu_areas(void)
568 { 568 {
569 int i; 569 int i;
570 unsigned long size; 570 unsigned long size;
571 char *ptr; 571 char *ptr;
572 572
573 /* Copy section for each CPU (we discard the original) */ 573 /* Copy section for each CPU (we discard the original) */
574 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); 574 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
575 #ifdef CONFIG_MODULES 575 #ifdef CONFIG_MODULES
576 if (size < PERCPU_ENOUGH_ROOM) 576 if (size < PERCPU_ENOUGH_ROOM)
577 size = PERCPU_ENOUGH_ROOM; 577 size = PERCPU_ENOUGH_ROOM;
578 #endif 578 #endif
579 579
580 for_each_possible_cpu(i) { 580 for_each_possible_cpu(i) {
581 ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); 581 ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
582 if (!ptr) 582 if (!ptr)
583 panic("Cannot allocate cpu data for CPU %d\n", i); 583 panic("Cannot allocate cpu data for CPU %d\n", i);
584 584
585 paca[i].data_offset = ptr - __per_cpu_start; 585 paca[i].data_offset = ptr - __per_cpu_start;
586 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 586 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
587 } 587 }
588 } 588 }
589 #endif 589 #endif
590 590
arch/powerpc/platforms/iseries/ksyms.c
1 /* 1 /*
2 * (C) 2001-2005 PPC 64 Team, IBM Corp 2 * (C) 2001-2005 PPC 64 Team, IBM Corp
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 #include <linux/module.h> 9 #include <linux/module.h>
10 10
11 #include <asm/hw_irq.h> 11 #include <asm/hw_irq.h>
12 #include <asm/iseries/hv_call_sc.h> 12 #include <asm/iseries/hv_call_sc.h>
13 13
14 EXPORT_SYMBOL(HvCall0); 14 EXPORT_SYMBOL(HvCall0);
15 EXPORT_SYMBOL(HvCall1); 15 EXPORT_SYMBOL(HvCall1);
16 EXPORT_SYMBOL(HvCall2); 16 EXPORT_SYMBOL(HvCall2);
17 EXPORT_SYMBOL(HvCall3); 17 EXPORT_SYMBOL(HvCall3);
18 EXPORT_SYMBOL(HvCall4); 18 EXPORT_SYMBOL(HvCall4);
19 EXPORT_SYMBOL(HvCall5); 19 EXPORT_SYMBOL(HvCall5);
20 EXPORT_SYMBOL(HvCall6); 20 EXPORT_SYMBOL(HvCall6);
21 EXPORT_SYMBOL(HvCall7); 21 EXPORT_SYMBOL(HvCall7);
22
23 #ifdef CONFIG_SMP
24 EXPORT_SYMBOL(local_get_flags);
25 EXPORT_SYMBOL(local_irq_disable);
26 EXPORT_SYMBOL(local_irq_restore);
27 #endif
28 22
arch/powerpc/platforms/iseries/misc.S
1 /* 1 /*
2 * This file contains miscellaneous low-level functions. 2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-2005 IBM Corp 3 * Copyright (C) 1995-2005 IBM Corp
4 * 4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) 5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras. 6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) 7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version. 13 * 2 of the License, or (at your option) any later version.
14 */ 14 */
15 15
16 #include <asm/processor.h> 16 #include <asm/processor.h>
17 #include <asm/asm-offsets.h> 17 #include <asm/asm-offsets.h>
18 #include <asm/ppc_asm.h> 18 #include <asm/ppc_asm.h>
19 19
20 .text 20 .text
21 21
22 /* unsigned long local_save_flags(void) */ 22 /* Handle pending interrupts in interrupt context */
23 _GLOBAL(local_get_flags) 23 _GLOBAL(iseries_handle_interrupts)
24 lbz r3,PACAPROCENABLED(r13)
25 blr
26
27 /* unsigned long local_irq_disable(void) */
28 _GLOBAL(local_irq_disable)
29 lbz r3,PACAPROCENABLED(r13)
30 li r4,0
31 stb r4,PACAPROCENABLED(r13)
32 blr /* Done */
33
34 /* void local_irq_restore(unsigned long flags) */
35 _GLOBAL(local_irq_restore)
36 lbz r5,PACAPROCENABLED(r13)
37 /* Check if things are setup the way we want _already_. */
38 cmpw 0,r3,r5
39 beqlr
40 /* are we enabling interrupts? */
41 cmpdi 0,r3,0
42 stb r3,PACAPROCENABLED(r13)
43 beqlr
44 /* Check pending interrupts */
45 /* A decrementer, IPI or PMC interrupt may have occurred
46 * while we were in the hypervisor (which enables) */
47 ld r4,PACALPPACAPTR(r13)
48 ld r4,LPPACAANYINT(r4)
49 cmpdi r4,0
50 beqlr
51
52 /*
53 * Handle pending interrupts in interrupt context
54 */
55 li r0,0x5555 24 li r0,0x5555
56 sc 25 sc
57 blr 26 blr
58 27
include/asm-powerpc/hw_irq.h
1 /* 1 /*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */ 3 */
4 #ifndef _ASM_POWERPC_HW_IRQ_H 4 #ifndef _ASM_POWERPC_HW_IRQ_H
5 #define _ASM_POWERPC_HW_IRQ_H 5 #define _ASM_POWERPC_HW_IRQ_H
6 6
7 #ifdef __KERNEL__ 7 #ifdef __KERNEL__
8 8
9 #include <linux/errno.h> 9 #include <linux/errno.h>
10 #include <linux/compiler.h>
10 #include <asm/ptrace.h> 11 #include <asm/ptrace.h>
11 #include <asm/processor.h> 12 #include <asm/processor.h>
12 13
13 extern void timer_interrupt(struct pt_regs *); 14 extern void timer_interrupt(struct pt_regs *);
14 15
15 #ifdef CONFIG_PPC_ISERIES 16 #ifdef CONFIG_PPC64
17 #include <asm/paca.h>
16 18
17 extern unsigned long local_get_flags(void); 19 static inline unsigned long local_get_flags(void)
18 extern unsigned long local_irq_disable(void); 20 {
21 return get_paca()->soft_enabled;
22 }
23
24 static inline unsigned long local_irq_disable(void)
25 {
26 unsigned long flag = get_paca()->soft_enabled;
27 get_paca()->soft_enabled = 0;
28 barrier();
29 return flag;
30 }
31
19 extern void local_irq_restore(unsigned long); 32 extern void local_irq_restore(unsigned long);
33 extern void iseries_handle_interrupts(void);
20 34
21 #define local_irq_enable() local_irq_restore(1) 35 #define local_irq_enable() local_irq_restore(1)
22 #define local_save_flags(flags) ((flags) = local_get_flags()) 36 #define local_save_flags(flags) ((flags) = local_get_flags())
23 #define local_irq_save(flags) ((flags) = local_irq_disable()) 37 #define local_irq_save(flags) ((flags) = local_irq_disable())
24 38
25 #define irqs_disabled() (local_get_flags() == 0) 39 #define irqs_disabled() (local_get_flags() == 0)
26 40
41 #define hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
42 #define hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
43
27 #else 44 #else
28 45
29 #if defined(CONFIG_BOOKE) 46 #if defined(CONFIG_BOOKE)
30 #define SET_MSR_EE(x) mtmsr(x) 47 #define SET_MSR_EE(x) mtmsr(x)
31 #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") 48 #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
32 #elif defined(__powerpc64__)
33 #define SET_MSR_EE(x) __mtmsrd(x, 1)
34 #define local_irq_restore(flags) do { \
35 __asm__ __volatile__("": : :"memory"); \
36 __mtmsrd((flags), 1); \
37 } while(0)
38 #else 49 #else
39 #define SET_MSR_EE(x) mtmsr(x) 50 #define SET_MSR_EE(x) mtmsr(x)
40 #define local_irq_restore(flags) mtmsr(flags) 51 #define local_irq_restore(flags) mtmsr(flags)
41 #endif 52 #endif
42 53
43 static inline void local_irq_disable(void) 54 static inline void local_irq_disable(void)
44 { 55 {
45 #ifdef CONFIG_BOOKE 56 #ifdef CONFIG_BOOKE
46 __asm__ __volatile__("wrteei 0": : :"memory"); 57 __asm__ __volatile__("wrteei 0": : :"memory");
47 #else 58 #else
48 unsigned long msr; 59 unsigned long msr;
49 __asm__ __volatile__("": : :"memory"); 60 __asm__ __volatile__("": : :"memory");
50 msr = mfmsr(); 61 msr = mfmsr();
51 SET_MSR_EE(msr & ~MSR_EE); 62 SET_MSR_EE(msr & ~MSR_EE);
52 #endif 63 #endif
53 } 64 }
54 65
55 static inline void local_irq_enable(void) 66 static inline void local_irq_enable(void)
56 { 67 {
57 #ifdef CONFIG_BOOKE 68 #ifdef CONFIG_BOOKE
58 __asm__ __volatile__("wrteei 1": : :"memory"); 69 __asm__ __volatile__("wrteei 1": : :"memory");
59 #else 70 #else
60 unsigned long msr; 71 unsigned long msr;
61 __asm__ __volatile__("": : :"memory"); 72 __asm__ __volatile__("": : :"memory");
62 msr = mfmsr(); 73 msr = mfmsr();
63 SET_MSR_EE(msr | MSR_EE); 74 SET_MSR_EE(msr | MSR_EE);
64 #endif 75 #endif
65 } 76 }
66 77
67 static inline void local_irq_save_ptr(unsigned long *flags) 78 static inline void local_irq_save_ptr(unsigned long *flags)
68 { 79 {
69 unsigned long msr; 80 unsigned long msr;
70 msr = mfmsr(); 81 msr = mfmsr();
71 *flags = msr; 82 *flags = msr;
72 #ifdef CONFIG_BOOKE 83 #ifdef CONFIG_BOOKE
73 __asm__ __volatile__("wrteei 0": : :"memory"); 84 __asm__ __volatile__("wrteei 0": : :"memory");
74 #else 85 #else
75 SET_MSR_EE(msr & ~MSR_EE); 86 SET_MSR_EE(msr & ~MSR_EE);
76 #endif 87 #endif
77 __asm__ __volatile__("": : :"memory"); 88 __asm__ __volatile__("": : :"memory");
78 } 89 }
79 90
80 #define local_save_flags(flags) ((flags) = mfmsr()) 91 #define local_save_flags(flags) ((flags) = mfmsr())
81 #define local_irq_save(flags) local_irq_save_ptr(&flags) 92 #define local_irq_save(flags) local_irq_save_ptr(&flags)
82 #define irqs_disabled() ((mfmsr() & MSR_EE) == 0) 93 #define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
83 94
84 #endif /* CONFIG_PPC_ISERIES */ 95 #endif /* CONFIG_PPC64 */
85 96
86 #define mask_irq(irq) \ 97 #define mask_irq(irq) \
87 ({ \ 98 ({ \
88 irq_desc_t *desc = get_irq_desc(irq); \ 99 irq_desc_t *desc = get_irq_desc(irq); \
89 if (desc->chip && desc->chip->disable) \ 100 if (desc->chip && desc->chip->disable) \
90 desc->chip->disable(irq); \ 101 desc->chip->disable(irq); \
91 }) 102 })
92 #define unmask_irq(irq) \ 103 #define unmask_irq(irq) \
93 ({ \ 104 ({ \
94 irq_desc_t *desc = get_irq_desc(irq); \ 105 irq_desc_t *desc = get_irq_desc(irq); \
95 if (desc->chip && desc->chip->enable) \ 106 if (desc->chip && desc->chip->enable) \
96 desc->chip->enable(irq); \ 107 desc->chip->enable(irq); \
97 }) 108 })
98 #define ack_irq(irq) \ 109 #define ack_irq(irq) \
99 ({ \ 110 ({ \
100 irq_desc_t *desc = get_irq_desc(irq); \ 111 irq_desc_t *desc = get_irq_desc(irq); \
101 if (desc->chip && desc->chip->ack) \ 112 if (desc->chip && desc->chip->ack) \
102 desc->chip->ack(irq); \ 113 desc->chip->ack(irq); \
103 }) 114 })
104 115
105 /* 116 /*
106 * interrupt-retrigger: should we handle this via lost interrupts and IPIs 117 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
107 * or should we not care like we do now ? --BenH. 118 * or should we not care like we do now ? --BenH.
include/asm-powerpc/paca.h
1 /* 1 /*
2 * include/asm-powerpc/paca.h 2 * include/asm-powerpc/paca.h
3 * 3 *
4 * This control block defines the PACA which defines the processor 4 * This control block defines the PACA which defines the processor
5 * specific data for each logical processor on the system. 5 * specific data for each logical processor on the system.
6 * There are some pointers defined that are utilized by PLIC. 6 * There are some pointers defined that are utilized by PLIC.
7 * 7 *
8 * C 2001 PPC 64 Team, IBM Corp 8 * C 2001 PPC 64 Team, IBM Corp
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version. 13 * 2 of the License, or (at your option) any later version.
14 */ 14 */
15 #ifndef _ASM_POWERPC_PACA_H 15 #ifndef _ASM_POWERPC_PACA_H
16 #define _ASM_POWERPC_PACA_H 16 #define _ASM_POWERPC_PACA_H
17 #ifdef __KERNEL__ 17 #ifdef __KERNEL__
18 18
19 #include <asm/types.h> 19 #include <asm/types.h>
20 #include <asm/lppaca.h> 20 #include <asm/lppaca.h>
21 #include <asm/mmu.h> 21 #include <asm/mmu.h>
22 22
23 register struct paca_struct *local_paca asm("r13"); 23 register struct paca_struct *local_paca asm("r13");
24 #define get_paca() local_paca 24 #define get_paca() local_paca
25 #define get_lppaca() (get_paca()->lppaca_ptr) 25 #define get_lppaca() (get_paca()->lppaca_ptr)
26 #define get_slb_shadow() (get_paca()->slb_shadow_ptr) 26 #define get_slb_shadow() (get_paca()->slb_shadow_ptr)
27 27
28 struct task_struct; 28 struct task_struct;
29 29
30 /* 30 /*
31 * Defines the layout of the paca. 31 * Defines the layout of the paca.
32 * 32 *
33 * This structure is not directly accessed by firmware or the service 33 * This structure is not directly accessed by firmware or the service
34 * processor except for the first two pointers that point to the 34 * processor except for the first two pointers that point to the
35 * lppaca area and the ItLpRegSave area for this CPU. The lppaca 35 * lppaca area and the ItLpRegSave area for this CPU. The lppaca
36 * object is currently contained within the PACA but it doesn't need 36 * object is currently contained within the PACA but it doesn't need
37 * to be. 37 * to be.
38 */ 38 */
39 struct paca_struct { 39 struct paca_struct {
40 /* 40 /*
41 * Because hw_cpu_id, unlike other paca fields, is accessed 41 * Because hw_cpu_id, unlike other paca fields, is accessed
42 * routinely from other CPUs (from the IRQ code), we stick to 42 * routinely from other CPUs (from the IRQ code), we stick to
43 * read-only (after boot) fields in the first cacheline to 43 * read-only (after boot) fields in the first cacheline to
44 * avoid cacheline bouncing. 44 * avoid cacheline bouncing.
45 */ 45 */
46 46
47 /* 47 /*
48 * MAGIC: These first two pointers can't be moved - they're 48 * MAGIC: These first two pointers can't be moved - they're
49 * accessed by the firmware 49 * accessed by the firmware
50 */ 50 */
51 struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */ 51 struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
52 #ifdef CONFIG_PPC_ISERIES 52 #ifdef CONFIG_PPC_ISERIES
53 void *reg_save_ptr; /* Pointer to LpRegSave for PLIC */ 53 void *reg_save_ptr; /* Pointer to LpRegSave for PLIC */
54 #endif /* CONFIG_PPC_ISERIES */ 54 #endif /* CONFIG_PPC_ISERIES */
55 55
56 /* 56 /*
57 * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c 57 * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
58 * load lock_token and paca_index with a single lwz 58 * load lock_token and paca_index with a single lwz
59 * instruction. They must travel together and be properly 59 * instruction. They must travel together and be properly
60 * aligned. 60 * aligned.
61 */ 61 */
62 u16 lock_token; /* Constant 0x8000, used in locks */ 62 u16 lock_token; /* Constant 0x8000, used in locks */
63 u16 paca_index; /* Logical processor number */ 63 u16 paca_index; /* Logical processor number */
64 64
65 u64 kernel_toc; /* Kernel TOC address */ 65 u64 kernel_toc; /* Kernel TOC address */
66 u64 stab_real; /* Absolute address of segment table */ 66 u64 stab_real; /* Absolute address of segment table */
67 u64 stab_addr; /* Virtual address of segment table */ 67 u64 stab_addr; /* Virtual address of segment table */
68 void *emergency_sp; /* pointer to emergency stack */ 68 void *emergency_sp; /* pointer to emergency stack */
69 u64 data_offset; /* per cpu data offset */ 69 u64 data_offset; /* per cpu data offset */
70 s16 hw_cpu_id; /* Physical processor number */ 70 s16 hw_cpu_id; /* Physical processor number */
71 u8 cpu_start; /* At startup, processor spins until */ 71 u8 cpu_start; /* At startup, processor spins until */
72 /* this becomes non-zero. */ 72 /* this becomes non-zero. */
73 73
74 /* 74 /*
75 * Now, starting in cacheline 2, the exception save areas 75 * Now, starting in cacheline 2, the exception save areas
76 */ 76 */
77 /* used for most interrupts/exceptions */ 77 /* used for most interrupts/exceptions */
78 u64 exgen[10] __attribute__((aligned(0x80))); 78 u64 exgen[10] __attribute__((aligned(0x80)));
79 u64 exmc[10]; /* used for machine checks */ 79 u64 exmc[10]; /* used for machine checks */
80 u64 exslb[10]; /* used for SLB/segment table misses 80 u64 exslb[10]; /* used for SLB/segment table misses
81 * on the linear mapping */ 81 * on the linear mapping */
82 82
83 mm_context_t context; 83 mm_context_t context;
84 u16 vmalloc_sllp; 84 u16 vmalloc_sllp;
85 u16 slb_cache[SLB_CACHE_ENTRIES]; 85 u16 slb_cache[SLB_CACHE_ENTRIES];
86 u16 slb_cache_ptr; 86 u16 slb_cache_ptr;
87 87
88 /* 88 /*
89 * then miscellaneous read-write fields 89 * then miscellaneous read-write fields
90 */ 90 */
91 struct task_struct *__current; /* Pointer to current */ 91 struct task_struct *__current; /* Pointer to current */
92 u64 kstack; /* Saved Kernel stack addr */ 92 u64 kstack; /* Saved Kernel stack addr */
93 u64 stab_rr; /* stab/slb round-robin counter */ 93 u64 stab_rr; /* stab/slb round-robin counter */
94 u64 saved_r1; /* r1 save for RTAS calls */ 94 u64 saved_r1; /* r1 save for RTAS calls */
95 u64 saved_msr; /* MSR saved here by enter_rtas */ 95 u64 saved_msr; /* MSR saved here by enter_rtas */
96 u8 proc_enabled; /* irq soft-enable flag */ 96 u8 soft_enabled; /* irq soft-enable flag */
97 u8 hard_enabled; /* set if irqs are enabled in MSR */
97 u8 io_sync; /* writel() needs spin_unlock sync */ 98 u8 io_sync; /* writel() needs spin_unlock sync */
98 99
99 /* Stuff for accurate time accounting */ 100 /* Stuff for accurate time accounting */
100 u64 user_time; /* accumulated usermode TB ticks */ 101 u64 user_time; /* accumulated usermode TB ticks */
101 u64 system_time; /* accumulated system TB ticks */ 102 u64 system_time; /* accumulated system TB ticks */
102 u64 startpurr; /* PURR/TB value snapshot */ 103 u64 startpurr; /* PURR/TB value snapshot */
103 104
104 struct slb_shadow *slb_shadow_ptr; 105 struct slb_shadow *slb_shadow_ptr;
105 }; 106 };
106 107
107 extern struct paca_struct paca[]; 108 extern struct paca_struct paca[];
108 109
109 void setup_boot_paca(void); 110 void setup_boot_paca(void);
110 111
111 #endif /* __KERNEL__ */ 112 #endif /* __KERNEL__ */
112 #endif /* _ASM_POWERPC_PACA_H */ 113 #endif /* _ASM_POWERPC_PACA_H */
113 114