Commit af61bdf035e2e4dd646b37b270bd558188a127c0
1 parent
fe0d42203c
Exists in
master
and in
6 other branches
ARM: vfp: rename last_VFP_context to vfp_current_hw_state
Rename the slightly confusing 'last_VFP_context' variable to be more descriptive of what it actually is. This variable stores a pointer to the current owner's vfpstate structure for the context held in the VFP hardware. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Showing 2 changed files with 26 additions and 20 deletions Inline Diff
arch/arm/vfp/vfphw.S
1 | /* | 1 | /* |
2 | * linux/arch/arm/vfp/vfphw.S | 2 | * linux/arch/arm/vfp/vfphw.S |
3 | * | 3 | * |
4 | * Copyright (C) 2004 ARM Limited. | 4 | * Copyright (C) 2004 ARM Limited. |
5 | * Written by Deep Blue Solutions Limited. | 5 | * Written by Deep Blue Solutions Limited. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This code is called from the kernel's undefined instruction trap. | 11 | * This code is called from the kernel's undefined instruction trap. |
12 | * r9 holds the return address for successful handling. | 12 | * r9 holds the return address for successful handling. |
13 | * lr holds the return address for unrecognised instructions. | 13 | * lr holds the return address for unrecognised instructions. |
14 | * r10 points at the start of the private FP workspace in the thread structure | 14 | * r10 points at the start of the private FP workspace in the thread structure |
15 | * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h) | 15 | * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h) |
16 | */ | 16 | */ |
17 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
18 | #include <asm/vfpmacros.h> | 18 | #include <asm/vfpmacros.h> |
19 | #include "../kernel/entry-header.S" | 19 | #include "../kernel/entry-header.S" |
20 | 20 | ||
21 | .macro DBGSTR, str | 21 | .macro DBGSTR, str |
22 | #ifdef DEBUG | 22 | #ifdef DEBUG |
23 | stmfd sp!, {r0-r3, ip, lr} | 23 | stmfd sp!, {r0-r3, ip, lr} |
24 | add r0, pc, #4 | 24 | add r0, pc, #4 |
25 | bl printk | 25 | bl printk |
26 | b 1f | 26 | b 1f |
27 | .asciz "<7>VFP: \str\n" | 27 | .asciz "<7>VFP: \str\n" |
28 | .balign 4 | 28 | .balign 4 |
29 | 1: ldmfd sp!, {r0-r3, ip, lr} | 29 | 1: ldmfd sp!, {r0-r3, ip, lr} |
30 | #endif | 30 | #endif |
31 | .endm | 31 | .endm |
32 | 32 | ||
33 | .macro DBGSTR1, str, arg | 33 | .macro DBGSTR1, str, arg |
34 | #ifdef DEBUG | 34 | #ifdef DEBUG |
35 | stmfd sp!, {r0-r3, ip, lr} | 35 | stmfd sp!, {r0-r3, ip, lr} |
36 | mov r1, \arg | 36 | mov r1, \arg |
37 | add r0, pc, #4 | 37 | add r0, pc, #4 |
38 | bl printk | 38 | bl printk |
39 | b 1f | 39 | b 1f |
40 | .asciz "<7>VFP: \str\n" | 40 | .asciz "<7>VFP: \str\n" |
41 | .balign 4 | 41 | .balign 4 |
42 | 1: ldmfd sp!, {r0-r3, ip, lr} | 42 | 1: ldmfd sp!, {r0-r3, ip, lr} |
43 | #endif | 43 | #endif |
44 | .endm | 44 | .endm |
45 | 45 | ||
46 | .macro DBGSTR3, str, arg1, arg2, arg3 | 46 | .macro DBGSTR3, str, arg1, arg2, arg3 |
47 | #ifdef DEBUG | 47 | #ifdef DEBUG |
48 | stmfd sp!, {r0-r3, ip, lr} | 48 | stmfd sp!, {r0-r3, ip, lr} |
49 | mov r3, \arg3 | 49 | mov r3, \arg3 |
50 | mov r2, \arg2 | 50 | mov r2, \arg2 |
51 | mov r1, \arg1 | 51 | mov r1, \arg1 |
52 | add r0, pc, #4 | 52 | add r0, pc, #4 |
53 | bl printk | 53 | bl printk |
54 | b 1f | 54 | b 1f |
55 | .asciz "<7>VFP: \str\n" | 55 | .asciz "<7>VFP: \str\n" |
56 | .balign 4 | 56 | .balign 4 |
57 | 1: ldmfd sp!, {r0-r3, ip, lr} | 57 | 1: ldmfd sp!, {r0-r3, ip, lr} |
58 | #endif | 58 | #endif |
59 | .endm | 59 | .endm |
60 | 60 | ||
61 | 61 | ||
62 | @ VFP hardware support entry point. | 62 | @ VFP hardware support entry point. |
63 | @ | 63 | @ |
64 | @ r0 = faulted instruction | 64 | @ r0 = faulted instruction |
65 | @ r2 = faulted PC+4 | 65 | @ r2 = faulted PC+4 |
66 | @ r9 = successful return | 66 | @ r9 = successful return |
67 | @ r10 = vfp_state union | 67 | @ r10 = vfp_state union |
68 | @ r11 = CPU number | 68 | @ r11 = CPU number |
69 | @ lr = failure return | 69 | @ lr = failure return |
70 | 70 | ||
71 | ENTRY(vfp_support_entry) | 71 | ENTRY(vfp_support_entry) |
72 | DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 | 72 | DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 |
73 | 73 | ||
74 | VFPFMRX r1, FPEXC @ Is the VFP enabled? | 74 | VFPFMRX r1, FPEXC @ Is the VFP enabled? |
75 | DBGSTR1 "fpexc %08x", r1 | 75 | DBGSTR1 "fpexc %08x", r1 |
76 | tst r1, #FPEXC_EN | 76 | tst r1, #FPEXC_EN |
77 | bne look_for_VFP_exceptions @ VFP is already enabled | 77 | bne look_for_VFP_exceptions @ VFP is already enabled |
78 | 78 | ||
79 | DBGSTR1 "enable %x", r10 | 79 | DBGSTR1 "enable %x", r10 |
80 | ldr r3, last_VFP_context_address | 80 | ldr r3, vfp_current_hw_state_address |
81 | orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set | 81 | orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set |
82 | ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer | 82 | ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer |
83 | bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled | 83 | bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled |
84 | cmp r4, r10 | 84 | cmp r4, r10 |
85 | beq check_for_exception @ we are returning to the same | 85 | beq check_for_exception @ we are returning to the same |
86 | @ process, so the registers are | 86 | @ process, so the registers are |
87 | @ still there. In this case, we do | 87 | @ still there. In this case, we do |
88 | @ not want to drop a pending exception. | 88 | @ not want to drop a pending exception. |
89 | 89 | ||
90 | VFPFMXR FPEXC, r5 @ enable VFP, disable any pending | 90 | VFPFMXR FPEXC, r5 @ enable VFP, disable any pending |
91 | @ exceptions, so we can get at the | 91 | @ exceptions, so we can get at the |
92 | @ rest of it | 92 | @ rest of it |
93 | 93 | ||
94 | #ifndef CONFIG_SMP | 94 | #ifndef CONFIG_SMP |
95 | @ Save out the current registers to the old thread state | 95 | @ Save out the current registers to the old thread state |
96 | @ No need for SMP since this is not done lazily | 96 | @ No need for SMP since this is not done lazily |
97 | 97 | ||
98 | DBGSTR1 "save old state %p", r4 | 98 | DBGSTR1 "save old state %p", r4 |
99 | cmp r4, #0 | 99 | cmp r4, #0 |
100 | beq no_old_VFP_process | 100 | beq no_old_VFP_process |
101 | VFPFSTMIA r4, r5 @ save the working registers | 101 | VFPFSTMIA r4, r5 @ save the working registers |
102 | VFPFMRX r5, FPSCR @ current status | 102 | VFPFMRX r5, FPSCR @ current status |
103 | #ifndef CONFIG_CPU_FEROCEON | 103 | #ifndef CONFIG_CPU_FEROCEON |
104 | tst r1, #FPEXC_EX @ is there additional state to save? | 104 | tst r1, #FPEXC_EX @ is there additional state to save? |
105 | beq 1f | 105 | beq 1f |
106 | VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set) | 106 | VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set) |
107 | tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? | 107 | tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? |
108 | beq 1f | 108 | beq 1f |
109 | VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present) | 109 | VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present) |
110 | 1: | 110 | 1: |
111 | #endif | 111 | #endif |
112 | stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 | 112 | stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 |
113 | @ and point r4 at the word at the | 113 | @ and point r4 at the word at the |
114 | @ start of the register dump | 114 | @ start of the register dump |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | no_old_VFP_process: | 117 | no_old_VFP_process: |
118 | DBGSTR1 "load state %p", r10 | 118 | DBGSTR1 "load state %p", r10 |
119 | str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer | 119 | str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer |
120 | @ Load the saved state back into the VFP | 120 | @ Load the saved state back into the VFP |
121 | VFPFLDMIA r10, r5 @ reload the working registers while | 121 | VFPFLDMIA r10, r5 @ reload the working registers while |
122 | @ FPEXC is in a safe state | 122 | @ FPEXC is in a safe state |
123 | ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 | 123 | ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 |
124 | #ifndef CONFIG_CPU_FEROCEON | 124 | #ifndef CONFIG_CPU_FEROCEON |
125 | tst r1, #FPEXC_EX @ is there additional state to restore? | 125 | tst r1, #FPEXC_EX @ is there additional state to restore? |
126 | beq 1f | 126 | beq 1f |
127 | VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set) | 127 | VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set) |
128 | tst r1, #FPEXC_FP2V @ is there an FPINST2 to write? | 128 | tst r1, #FPEXC_FP2V @ is there an FPINST2 to write? |
129 | beq 1f | 129 | beq 1f |
130 | VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present) | 130 | VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present) |
131 | 1: | 131 | 1: |
132 | #endif | 132 | #endif |
133 | VFPFMXR FPSCR, r5 @ restore status | 133 | VFPFMXR FPSCR, r5 @ restore status |
134 | 134 | ||
135 | check_for_exception: | 135 | check_for_exception: |
136 | tst r1, #FPEXC_EX | 136 | tst r1, #FPEXC_EX |
137 | bne process_exception @ might as well handle the pending | 137 | bne process_exception @ might as well handle the pending |
138 | @ exception before retrying branch | 138 | @ exception before retrying branch |
139 | @ out before setting an FPEXC that | 139 | @ out before setting an FPEXC that |
140 | @ stops us reading stuff | 140 | @ stops us reading stuff |
141 | VFPFMXR FPEXC, r1 @ restore FPEXC last | 141 | VFPFMXR FPEXC, r1 @ restore FPEXC last |
142 | sub r2, r2, #4 | 142 | sub r2, r2, #4 |
143 | str r2, [sp, #S_PC] @ retry the instruction | 143 | str r2, [sp, #S_PC] @ retry the instruction |
144 | #ifdef CONFIG_PREEMPT | 144 | #ifdef CONFIG_PREEMPT |
145 | get_thread_info r10 | 145 | get_thread_info r10 |
146 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | 146 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count |
147 | sub r11, r4, #1 @ decrement it | 147 | sub r11, r4, #1 @ decrement it |
148 | str r11, [r10, #TI_PREEMPT] | 148 | str r11, [r10, #TI_PREEMPT] |
149 | #endif | 149 | #endif |
150 | mov pc, r9 @ we think we have handled things | 150 | mov pc, r9 @ we think we have handled things |
151 | 151 | ||
152 | 152 | ||
153 | look_for_VFP_exceptions: | 153 | look_for_VFP_exceptions: |
154 | @ Check for synchronous or asynchronous exception | 154 | @ Check for synchronous or asynchronous exception |
155 | tst r1, #FPEXC_EX | FPEXC_DEX | 155 | tst r1, #FPEXC_EX | FPEXC_DEX |
156 | bne process_exception | 156 | bne process_exception |
157 | @ On some implementations of the VFP subarch 1, setting FPSCR.IXE | 157 | @ On some implementations of the VFP subarch 1, setting FPSCR.IXE |
158 | @ causes all the CDP instructions to be bounced synchronously without | 158 | @ causes all the CDP instructions to be bounced synchronously without |
159 | @ setting the FPEXC.EX bit | 159 | @ setting the FPEXC.EX bit |
160 | VFPFMRX r5, FPSCR | 160 | VFPFMRX r5, FPSCR |
161 | tst r5, #FPSCR_IXE | 161 | tst r5, #FPSCR_IXE |
162 | bne process_exception | 162 | bne process_exception |
163 | 163 | ||
164 | @ Fall into hand on to next handler - appropriate coproc instr | 164 | @ Fall into hand on to next handler - appropriate coproc instr |
165 | @ not recognised by VFP | 165 | @ not recognised by VFP |
166 | 166 | ||
167 | DBGSTR "not VFP" | 167 | DBGSTR "not VFP" |
168 | #ifdef CONFIG_PREEMPT | 168 | #ifdef CONFIG_PREEMPT |
169 | get_thread_info r10 | 169 | get_thread_info r10 |
170 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | 170 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count |
171 | sub r11, r4, #1 @ decrement it | 171 | sub r11, r4, #1 @ decrement it |
172 | str r11, [r10, #TI_PREEMPT] | 172 | str r11, [r10, #TI_PREEMPT] |
173 | #endif | 173 | #endif |
174 | mov pc, lr | 174 | mov pc, lr |
175 | 175 | ||
176 | process_exception: | 176 | process_exception: |
177 | DBGSTR "bounce" | 177 | DBGSTR "bounce" |
178 | mov r2, sp @ nothing stacked - regdump is at TOS | 178 | mov r2, sp @ nothing stacked - regdump is at TOS |
179 | mov lr, r9 @ setup for a return to the user code. | 179 | mov lr, r9 @ setup for a return to the user code. |
180 | 180 | ||
181 | @ Now call the C code to package up the bounce to the support code | 181 | @ Now call the C code to package up the bounce to the support code |
182 | @ r0 holds the trigger instruction | 182 | @ r0 holds the trigger instruction |
183 | @ r1 holds the FPEXC value | 183 | @ r1 holds the FPEXC value |
184 | @ r2 pointer to register dump | 184 | @ r2 pointer to register dump |
185 | b VFP_bounce @ we have handled this - the support | 185 | b VFP_bounce @ we have handled this - the support |
186 | @ code will raise an exception if | 186 | @ code will raise an exception if |
187 | @ required. If not, the user code will | 187 | @ required. If not, the user code will |
188 | @ retry the faulted instruction | 188 | @ retry the faulted instruction |
189 | ENDPROC(vfp_support_entry) | 189 | ENDPROC(vfp_support_entry) |
190 | 190 | ||
191 | ENTRY(vfp_save_state) | 191 | ENTRY(vfp_save_state) |
192 | @ Save the current VFP state | 192 | @ Save the current VFP state |
193 | @ r0 - save location | 193 | @ r0 - save location |
194 | @ r1 - FPEXC | 194 | @ r1 - FPEXC |
195 | DBGSTR1 "save VFP state %p", r0 | 195 | DBGSTR1 "save VFP state %p", r0 |
196 | VFPFSTMIA r0, r2 @ save the working registers | 196 | VFPFSTMIA r0, r2 @ save the working registers |
197 | VFPFMRX r2, FPSCR @ current status | 197 | VFPFMRX r2, FPSCR @ current status |
198 | tst r1, #FPEXC_EX @ is there additional state to save? | 198 | tst r1, #FPEXC_EX @ is there additional state to save? |
199 | beq 1f | 199 | beq 1f |
200 | VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set) | 200 | VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set) |
201 | tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? | 201 | tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? |
202 | beq 1f | 202 | beq 1f |
203 | VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present) | 203 | VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present) |
204 | 1: | 204 | 1: |
205 | stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 | 205 | stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 |
206 | mov pc, lr | 206 | mov pc, lr |
207 | ENDPROC(vfp_save_state) | 207 | ENDPROC(vfp_save_state) |
208 | 208 | ||
209 | .align | 209 | .align |
210 | last_VFP_context_address: | 210 | vfp_current_hw_state_address: |
211 | .word last_VFP_context | 211 | .word vfp_current_hw_state |
212 | 212 | ||
213 | .macro tbl_branch, base, tmp, shift | 213 | .macro tbl_branch, base, tmp, shift |
214 | #ifdef CONFIG_THUMB2_KERNEL | 214 | #ifdef CONFIG_THUMB2_KERNEL |
215 | adr \tmp, 1f | 215 | adr \tmp, 1f |
216 | add \tmp, \tmp, \base, lsl \shift | 216 | add \tmp, \tmp, \base, lsl \shift |
217 | mov pc, \tmp | 217 | mov pc, \tmp |
218 | #else | 218 | #else |
219 | add pc, pc, \base, lsl \shift | 219 | add pc, pc, \base, lsl \shift |
220 | mov r0, r0 | 220 | mov r0, r0 |
221 | #endif | 221 | #endif |
222 | 1: | 222 | 1: |
223 | .endm | 223 | .endm |
224 | 224 | ||
225 | ENTRY(vfp_get_float) | 225 | ENTRY(vfp_get_float) |
226 | tbl_branch r0, r3, #3 | 226 | tbl_branch r0, r3, #3 |
227 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | 227 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
228 | 1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 | 228 | 1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 |
229 | mov pc, lr | 229 | mov pc, lr |
230 | .org 1b + 8 | 230 | .org 1b + 8 |
231 | 1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 | 231 | 1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 |
232 | mov pc, lr | 232 | mov pc, lr |
233 | .org 1b + 8 | 233 | .org 1b + 8 |
234 | .endr | 234 | .endr |
235 | ENDPROC(vfp_get_float) | 235 | ENDPROC(vfp_get_float) |
236 | 236 | ||
237 | ENTRY(vfp_put_float) | 237 | ENTRY(vfp_put_float) |
238 | tbl_branch r1, r3, #3 | 238 | tbl_branch r1, r3, #3 |
239 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | 239 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
240 | 1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 | 240 | 1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 |
241 | mov pc, lr | 241 | mov pc, lr |
242 | .org 1b + 8 | 242 | .org 1b + 8 |
243 | 1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 | 243 | 1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 |
244 | mov pc, lr | 244 | mov pc, lr |
245 | .org 1b + 8 | 245 | .org 1b + 8 |
246 | .endr | 246 | .endr |
247 | ENDPROC(vfp_put_float) | 247 | ENDPROC(vfp_put_float) |
248 | 248 | ||
249 | ENTRY(vfp_get_double) | 249 | ENTRY(vfp_get_double) |
250 | tbl_branch r0, r3, #3 | 250 | tbl_branch r0, r3, #3 |
251 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | 251 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
252 | 1: fmrrd r0, r1, d\dr | 252 | 1: fmrrd r0, r1, d\dr |
253 | mov pc, lr | 253 | mov pc, lr |
254 | .org 1b + 8 | 254 | .org 1b + 8 |
255 | .endr | 255 | .endr |
256 | #ifdef CONFIG_VFPv3 | 256 | #ifdef CONFIG_VFPv3 |
257 | @ d16 - d31 registers | 257 | @ d16 - d31 registers |
258 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | 258 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
259 | 1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr | 259 | 1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr |
260 | mov pc, lr | 260 | mov pc, lr |
261 | .org 1b + 8 | 261 | .org 1b + 8 |
262 | .endr | 262 | .endr |
263 | #endif | 263 | #endif |
264 | 264 | ||
265 | @ virtual register 16 (or 32 if VFPv3) for compare with zero | 265 | @ virtual register 16 (or 32 if VFPv3) for compare with zero |
266 | mov r0, #0 | 266 | mov r0, #0 |
267 | mov r1, #0 | 267 | mov r1, #0 |
268 | mov pc, lr | 268 | mov pc, lr |
269 | ENDPROC(vfp_get_double) | 269 | ENDPROC(vfp_get_double) |
270 | 270 | ||
271 | ENTRY(vfp_put_double) | 271 | ENTRY(vfp_put_double) |
272 | tbl_branch r2, r3, #3 | 272 | tbl_branch r2, r3, #3 |
273 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | 273 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
274 | 1: fmdrr d\dr, r0, r1 | 274 | 1: fmdrr d\dr, r0, r1 |
275 | mov pc, lr | 275 | mov pc, lr |
276 | .org 1b + 8 | 276 | .org 1b + 8 |
277 | .endr | 277 | .endr |
278 | #ifdef CONFIG_VFPv3 | 278 | #ifdef CONFIG_VFPv3 |
279 | @ d16 - d31 registers | 279 | @ d16 - d31 registers |
280 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | 280 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
281 | 1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr | 281 | 1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr |
282 | mov pc, lr | 282 | mov pc, lr |
283 | .org 1b + 8 | 283 | .org 1b + 8 |
284 | .endr | 284 | .endr |
285 | #endif | 285 | #endif |
286 | ENDPROC(vfp_put_double) | 286 | ENDPROC(vfp_put_double) |
287 | 287 |
arch/arm/vfp/vfpmodule.c
1 | /* | 1 | /* |
2 | * linux/arch/arm/vfp/vfpmodule.c | 2 | * linux/arch/arm/vfp/vfpmodule.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 ARM Limited. | 4 | * Copyright (C) 2004 ARM Limited. |
5 | * Written by Deep Blue Solutions Limited. | 5 | * Written by Deep Blue Solutions Limited. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/notifier.h> | 15 | #include <linux/notifier.h> |
16 | #include <linux/signal.h> | 16 | #include <linux/signal.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | 20 | ||
21 | #include <asm/cputype.h> | 21 | #include <asm/cputype.h> |
22 | #include <asm/thread_notify.h> | 22 | #include <asm/thread_notify.h> |
23 | #include <asm/vfp.h> | 23 | #include <asm/vfp.h> |
24 | 24 | ||
25 | #include "vfpinstr.h" | 25 | #include "vfpinstr.h" |
26 | #include "vfp.h" | 26 | #include "vfp.h" |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Our undef handlers (in entry.S) | 29 | * Our undef handlers (in entry.S) |
30 | */ | 30 | */ |
31 | void vfp_testing_entry(void); | 31 | void vfp_testing_entry(void); |
32 | void vfp_support_entry(void); | 32 | void vfp_support_entry(void); |
33 | void vfp_null_entry(void); | 33 | void vfp_null_entry(void); |
34 | 34 | ||
35 | void (*vfp_vector)(void) = vfp_null_entry; | 35 | void (*vfp_vector)(void) = vfp_null_entry; |
36 | union vfp_state *last_VFP_context[NR_CPUS]; | ||
37 | 36 | ||
38 | /* | 37 | /* |
38 | * The pointer to the vfpstate structure of the thread which currently | ||
39 | * owns the context held in the VFP hardware, or NULL if the hardware | ||
40 | * context is invalid. | ||
41 | */ | ||
42 | union vfp_state *vfp_current_hw_state[NR_CPUS]; | ||
43 | |||
44 | /* | ||
39 | * Dual-use variable. | 45 | * Dual-use variable. |
40 | * Used in startup: set to non-zero if VFP checks fail | 46 | * Used in startup: set to non-zero if VFP checks fail |
41 | * After startup, holds VFP architecture | 47 | * After startup, holds VFP architecture |
42 | */ | 48 | */ |
43 | unsigned int VFP_arch; | 49 | unsigned int VFP_arch; |
44 | 50 | ||
45 | /* | 51 | /* |
46 | * Per-thread VFP initialization. | 52 | * Per-thread VFP initialization. |
47 | */ | 53 | */ |
48 | static void vfp_thread_flush(struct thread_info *thread) | 54 | static void vfp_thread_flush(struct thread_info *thread) |
49 | { | 55 | { |
50 | union vfp_state *vfp = &thread->vfpstate; | 56 | union vfp_state *vfp = &thread->vfpstate; |
51 | unsigned int cpu; | 57 | unsigned int cpu; |
52 | 58 | ||
53 | memset(vfp, 0, sizeof(union vfp_state)); | 59 | memset(vfp, 0, sizeof(union vfp_state)); |
54 | 60 | ||
55 | vfp->hard.fpexc = FPEXC_EN; | 61 | vfp->hard.fpexc = FPEXC_EN; |
56 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; | 62 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; |
57 | 63 | ||
58 | /* | 64 | /* |
59 | * Disable VFP to ensure we initialize it first. We must ensure | 65 | * Disable VFP to ensure we initialize it first. We must ensure |
60 | * that the modification of last_VFP_context[] and hardware disable | 66 | * that the modification of vfp_current_hw_state[] and hardware disable |
61 | * are done for the same CPU and without preemption. | 67 | * are done for the same CPU and without preemption. |
62 | */ | 68 | */ |
63 | cpu = get_cpu(); | 69 | cpu = get_cpu(); |
64 | if (last_VFP_context[cpu] == vfp) | 70 | if (vfp_current_hw_state[cpu] == vfp) |
65 | last_VFP_context[cpu] = NULL; | 71 | vfp_current_hw_state[cpu] = NULL; |
66 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 72 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
67 | put_cpu(); | 73 | put_cpu(); |
68 | } | 74 | } |
69 | 75 | ||
70 | static void vfp_thread_exit(struct thread_info *thread) | 76 | static void vfp_thread_exit(struct thread_info *thread) |
71 | { | 77 | { |
72 | /* release case: Per-thread VFP cleanup. */ | 78 | /* release case: Per-thread VFP cleanup. */ |
73 | union vfp_state *vfp = &thread->vfpstate; | 79 | union vfp_state *vfp = &thread->vfpstate; |
74 | unsigned int cpu = get_cpu(); | 80 | unsigned int cpu = get_cpu(); |
75 | 81 | ||
76 | if (last_VFP_context[cpu] == vfp) | 82 | if (vfp_current_hw_state[cpu] == vfp) |
77 | last_VFP_context[cpu] = NULL; | 83 | vfp_current_hw_state[cpu] = NULL; |
78 | put_cpu(); | 84 | put_cpu(); |
79 | } | 85 | } |
80 | 86 | ||
81 | static void vfp_thread_copy(struct thread_info *thread) | 87 | static void vfp_thread_copy(struct thread_info *thread) |
82 | { | 88 | { |
83 | struct thread_info *parent = current_thread_info(); | 89 | struct thread_info *parent = current_thread_info(); |
84 | 90 | ||
85 | vfp_sync_hwstate(parent); | 91 | vfp_sync_hwstate(parent); |
86 | thread->vfpstate = parent->vfpstate; | 92 | thread->vfpstate = parent->vfpstate; |
87 | } | 93 | } |
88 | 94 | ||
89 | /* | 95 | /* |
90 | * When this function is called with the following 'cmd's, the following | 96 | * When this function is called with the following 'cmd's, the following |
91 | * is true while this function is being run: | 97 | * is true while this function is being run: |
92 | * THREAD_NOFTIFY_SWTICH: | 98 | * THREAD_NOFTIFY_SWTICH: |
93 | * - the previously running thread will not be scheduled onto another CPU. | 99 | * - the previously running thread will not be scheduled onto another CPU. |
94 | * - the next thread to be run (v) will not be running on another CPU. | 100 | * - the next thread to be run (v) will not be running on another CPU. |
95 | * - thread->cpu is the local CPU number | 101 | * - thread->cpu is the local CPU number |
96 | * - not preemptible as we're called in the middle of a thread switch | 102 | * - not preemptible as we're called in the middle of a thread switch |
97 | * THREAD_NOTIFY_FLUSH: | 103 | * THREAD_NOTIFY_FLUSH: |
98 | * - the thread (v) will be running on the local CPU, so | 104 | * - the thread (v) will be running on the local CPU, so |
99 | * v === current_thread_info() | 105 | * v === current_thread_info() |
100 | * - thread->cpu is the local CPU number at the time it is accessed, | 106 | * - thread->cpu is the local CPU number at the time it is accessed, |
101 | * but may change at any time. | 107 | * but may change at any time. |
102 | * - we could be preempted if tree preempt rcu is enabled, so | 108 | * - we could be preempted if tree preempt rcu is enabled, so |
103 | * it is unsafe to use thread->cpu. | 109 | * it is unsafe to use thread->cpu. |
104 | * THREAD_NOTIFY_EXIT | 110 | * THREAD_NOTIFY_EXIT |
105 | * - the thread (v) will be running on the local CPU, so | 111 | * - the thread (v) will be running on the local CPU, so |
106 | * v === current_thread_info() | 112 | * v === current_thread_info() |
107 | * - thread->cpu is the local CPU number at the time it is accessed, | 113 | * - thread->cpu is the local CPU number at the time it is accessed, |
108 | * but may change at any time. | 114 | * but may change at any time. |
109 | * - we could be preempted if tree preempt rcu is enabled, so | 115 | * - we could be preempted if tree preempt rcu is enabled, so |
110 | * it is unsafe to use thread->cpu. | 116 | * it is unsafe to use thread->cpu. |
111 | */ | 117 | */ |
112 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | 118 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
113 | { | 119 | { |
114 | struct thread_info *thread = v; | 120 | struct thread_info *thread = v; |
115 | u32 fpexc; | 121 | u32 fpexc; |
116 | #ifdef CONFIG_SMP | 122 | #ifdef CONFIG_SMP |
117 | unsigned int cpu; | 123 | unsigned int cpu; |
118 | #endif | 124 | #endif |
119 | 125 | ||
120 | switch (cmd) { | 126 | switch (cmd) { |
121 | case THREAD_NOTIFY_SWITCH: | 127 | case THREAD_NOTIFY_SWITCH: |
122 | fpexc = fmrx(FPEXC); | 128 | fpexc = fmrx(FPEXC); |
123 | 129 | ||
124 | #ifdef CONFIG_SMP | 130 | #ifdef CONFIG_SMP |
125 | cpu = thread->cpu; | 131 | cpu = thread->cpu; |
126 | 132 | ||
127 | /* | 133 | /* |
128 | * On SMP, if VFP is enabled, save the old state in | 134 | * On SMP, if VFP is enabled, save the old state in |
129 | * case the thread migrates to a different CPU. The | 135 | * case the thread migrates to a different CPU. The |
130 | * restoring is done lazily. | 136 | * restoring is done lazily. |
131 | */ | 137 | */ |
132 | if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { | 138 | if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) { |
133 | vfp_save_state(last_VFP_context[cpu], fpexc); | 139 | vfp_save_state(vfp_current_hw_state[cpu], fpexc); |
134 | last_VFP_context[cpu]->hard.cpu = cpu; | 140 | vfp_current_hw_state[cpu]->hard.cpu = cpu; |
135 | } | 141 | } |
136 | /* | 142 | /* |
137 | * Thread migration, just force the reloading of the | 143 | * Thread migration, just force the reloading of the |
138 | * state on the new CPU in case the VFP registers | 144 | * state on the new CPU in case the VFP registers |
139 | * contain stale data. | 145 | * contain stale data. |
140 | */ | 146 | */ |
141 | if (thread->vfpstate.hard.cpu != cpu) | 147 | if (thread->vfpstate.hard.cpu != cpu) |
142 | last_VFP_context[cpu] = NULL; | 148 | vfp_current_hw_state[cpu] = NULL; |
143 | #endif | 149 | #endif |
144 | 150 | ||
145 | /* | 151 | /* |
146 | * Always disable VFP so we can lazily save/restore the | 152 | * Always disable VFP so we can lazily save/restore the |
147 | * old state. | 153 | * old state. |
148 | */ | 154 | */ |
149 | fmxr(FPEXC, fpexc & ~FPEXC_EN); | 155 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
150 | break; | 156 | break; |
151 | 157 | ||
152 | case THREAD_NOTIFY_FLUSH: | 158 | case THREAD_NOTIFY_FLUSH: |
153 | vfp_thread_flush(thread); | 159 | vfp_thread_flush(thread); |
154 | break; | 160 | break; |
155 | 161 | ||
156 | case THREAD_NOTIFY_EXIT: | 162 | case THREAD_NOTIFY_EXIT: |
157 | vfp_thread_exit(thread); | 163 | vfp_thread_exit(thread); |
158 | break; | 164 | break; |
159 | 165 | ||
160 | case THREAD_NOTIFY_COPY: | 166 | case THREAD_NOTIFY_COPY: |
161 | vfp_thread_copy(thread); | 167 | vfp_thread_copy(thread); |
162 | break; | 168 | break; |
163 | } | 169 | } |
164 | 170 | ||
165 | return NOTIFY_DONE; | 171 | return NOTIFY_DONE; |
166 | } | 172 | } |
167 | 173 | ||
168 | static struct notifier_block vfp_notifier_block = { | 174 | static struct notifier_block vfp_notifier_block = { |
169 | .notifier_call = vfp_notifier, | 175 | .notifier_call = vfp_notifier, |
170 | }; | 176 | }; |
171 | 177 | ||
172 | /* | 178 | /* |
173 | * Raise a SIGFPE for the current process. | 179 | * Raise a SIGFPE for the current process. |
174 | * sicode describes the signal being raised. | 180 | * sicode describes the signal being raised. |
175 | */ | 181 | */ |
176 | static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) | 182 | static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) |
177 | { | 183 | { |
178 | siginfo_t info; | 184 | siginfo_t info; |
179 | 185 | ||
180 | memset(&info, 0, sizeof(info)); | 186 | memset(&info, 0, sizeof(info)); |
181 | 187 | ||
182 | info.si_signo = SIGFPE; | 188 | info.si_signo = SIGFPE; |
183 | info.si_code = sicode; | 189 | info.si_code = sicode; |
184 | info.si_addr = (void __user *)(instruction_pointer(regs) - 4); | 190 | info.si_addr = (void __user *)(instruction_pointer(regs) - 4); |
185 | 191 | ||
186 | /* | 192 | /* |
187 | * This is the same as NWFPE, because it's not clear what | 193 | * This is the same as NWFPE, because it's not clear what |
188 | * this is used for | 194 | * this is used for |
189 | */ | 195 | */ |
190 | current->thread.error_code = 0; | 196 | current->thread.error_code = 0; |
191 | current->thread.trap_no = 6; | 197 | current->thread.trap_no = 6; |
192 | 198 | ||
193 | send_sig_info(SIGFPE, &info, current); | 199 | send_sig_info(SIGFPE, &info, current); |
194 | } | 200 | } |
195 | 201 | ||
196 | static void vfp_panic(char *reason, u32 inst) | 202 | static void vfp_panic(char *reason, u32 inst) |
197 | { | 203 | { |
198 | int i; | 204 | int i; |
199 | 205 | ||
200 | printk(KERN_ERR "VFP: Error: %s\n", reason); | 206 | printk(KERN_ERR "VFP: Error: %s\n", reason); |
201 | printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", | 207 | printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", |
202 | fmrx(FPEXC), fmrx(FPSCR), inst); | 208 | fmrx(FPEXC), fmrx(FPSCR), inst); |
203 | for (i = 0; i < 32; i += 2) | 209 | for (i = 0; i < 32; i += 2) |
204 | printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n", | 210 | printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n", |
205 | i, vfp_get_float(i), i+1, vfp_get_float(i+1)); | 211 | i, vfp_get_float(i), i+1, vfp_get_float(i+1)); |
206 | } | 212 | } |
207 | 213 | ||
208 | /* | 214 | /* |
209 | * Process bitmask of exception conditions. | 215 | * Process bitmask of exception conditions. |
210 | */ | 216 | */ |
211 | static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs) | 217 | static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs) |
212 | { | 218 | { |
213 | int si_code = 0; | 219 | int si_code = 0; |
214 | 220 | ||
215 | pr_debug("VFP: raising exceptions %08x\n", exceptions); | 221 | pr_debug("VFP: raising exceptions %08x\n", exceptions); |
216 | 222 | ||
217 | if (exceptions == VFP_EXCEPTION_ERROR) { | 223 | if (exceptions == VFP_EXCEPTION_ERROR) { |
218 | vfp_panic("unhandled bounce", inst); | 224 | vfp_panic("unhandled bounce", inst); |
219 | vfp_raise_sigfpe(0, regs); | 225 | vfp_raise_sigfpe(0, regs); |
220 | return; | 226 | return; |
221 | } | 227 | } |
222 | 228 | ||
223 | /* | 229 | /* |
224 | * If any of the status flags are set, update the FPSCR. | 230 | * If any of the status flags are set, update the FPSCR. |
225 | * Comparison instructions always return at least one of | 231 | * Comparison instructions always return at least one of |
226 | * these flags set. | 232 | * these flags set. |
227 | */ | 233 | */ |
228 | if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) | 234 | if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) |
229 | fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V); | 235 | fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V); |
230 | 236 | ||
231 | fpscr |= exceptions; | 237 | fpscr |= exceptions; |
232 | 238 | ||
233 | fmxr(FPSCR, fpscr); | 239 | fmxr(FPSCR, fpscr); |
234 | 240 | ||
235 | #define RAISE(stat,en,sig) \ | 241 | #define RAISE(stat,en,sig) \ |
236 | if (exceptions & stat && fpscr & en) \ | 242 | if (exceptions & stat && fpscr & en) \ |
237 | si_code = sig; | 243 | si_code = sig; |
238 | 244 | ||
239 | /* | 245 | /* |
240 | * These are arranged in priority order, least to highest. | 246 | * These are arranged in priority order, least to highest. |
241 | */ | 247 | */ |
242 | RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV); | 248 | RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV); |
243 | RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES); | 249 | RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES); |
244 | RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND); | 250 | RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND); |
245 | RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); | 251 | RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); |
246 | RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV); | 252 | RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV); |
247 | 253 | ||
248 | if (si_code) | 254 | if (si_code) |
249 | vfp_raise_sigfpe(si_code, regs); | 255 | vfp_raise_sigfpe(si_code, regs); |
250 | } | 256 | } |
251 | 257 | ||
252 | /* | 258 | /* |
253 | * Emulate a VFP instruction. | 259 | * Emulate a VFP instruction. |
254 | */ | 260 | */ |
255 | static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) | 261 | static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) |
256 | { | 262 | { |
257 | u32 exceptions = VFP_EXCEPTION_ERROR; | 263 | u32 exceptions = VFP_EXCEPTION_ERROR; |
258 | 264 | ||
259 | pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); | 265 | pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); |
260 | 266 | ||
261 | if (INST_CPRTDO(inst)) { | 267 | if (INST_CPRTDO(inst)) { |
262 | if (!INST_CPRT(inst)) { | 268 | if (!INST_CPRT(inst)) { |
263 | /* | 269 | /* |
264 | * CPDO | 270 | * CPDO |
265 | */ | 271 | */ |
266 | if (vfp_single(inst)) { | 272 | if (vfp_single(inst)) { |
267 | exceptions = vfp_single_cpdo(inst, fpscr); | 273 | exceptions = vfp_single_cpdo(inst, fpscr); |
268 | } else { | 274 | } else { |
269 | exceptions = vfp_double_cpdo(inst, fpscr); | 275 | exceptions = vfp_double_cpdo(inst, fpscr); |
270 | } | 276 | } |
271 | } else { | 277 | } else { |
272 | /* | 278 | /* |
273 | * A CPRT instruction can not appear in FPINST2, nor | 279 | * A CPRT instruction can not appear in FPINST2, nor |
274 | * can it cause an exception. Therefore, we do not | 280 | * can it cause an exception. Therefore, we do not |
275 | * have to emulate it. | 281 | * have to emulate it. |
276 | */ | 282 | */ |
277 | } | 283 | } |
278 | } else { | 284 | } else { |
279 | /* | 285 | /* |
280 | * A CPDT instruction can not appear in FPINST2, nor can | 286 | * A CPDT instruction can not appear in FPINST2, nor can |
281 | * it cause an exception. Therefore, we do not have to | 287 | * it cause an exception. Therefore, we do not have to |
282 | * emulate it. | 288 | * emulate it. |
283 | */ | 289 | */ |
284 | } | 290 | } |
285 | return exceptions & ~VFP_NAN_FLAG; | 291 | return exceptions & ~VFP_NAN_FLAG; |
286 | } | 292 | } |
287 | 293 | ||
288 | /* | 294 | /* |
289 | * Package up a bounce condition. | 295 | * Package up a bounce condition. |
290 | */ | 296 | */ |
291 | void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) | 297 | void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) |
292 | { | 298 | { |
293 | u32 fpscr, orig_fpscr, fpsid, exceptions; | 299 | u32 fpscr, orig_fpscr, fpsid, exceptions; |
294 | 300 | ||
295 | pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); | 301 | pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); |
296 | 302 | ||
297 | /* | 303 | /* |
298 | * At this point, FPEXC can have the following configuration: | 304 | * At this point, FPEXC can have the following configuration: |
299 | * | 305 | * |
300 | * EX DEX IXE | 306 | * EX DEX IXE |
301 | * 0 1 x - synchronous exception | 307 | * 0 1 x - synchronous exception |
302 | * 1 x 0 - asynchronous exception | 308 | * 1 x 0 - asynchronous exception |
303 | * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later | 309 | * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later |
304 | * 0 0 1 - synchronous on VFP9 (non-standard subarch 1 | 310 | * 0 0 1 - synchronous on VFP9 (non-standard subarch 1 |
305 | * implementation), undefined otherwise | 311 | * implementation), undefined otherwise |
306 | * | 312 | * |
307 | * Clear various bits and enable access to the VFP so we can | 313 | * Clear various bits and enable access to the VFP so we can |
308 | * handle the bounce. | 314 | * handle the bounce. |
309 | */ | 315 | */ |
310 | fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)); | 316 | fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)); |
311 | 317 | ||
312 | fpsid = fmrx(FPSID); | 318 | fpsid = fmrx(FPSID); |
313 | orig_fpscr = fpscr = fmrx(FPSCR); | 319 | orig_fpscr = fpscr = fmrx(FPSCR); |
314 | 320 | ||
315 | /* | 321 | /* |
316 | * Check for the special VFP subarch 1 and FPSCR.IXE bit case | 322 | * Check for the special VFP subarch 1 and FPSCR.IXE bit case |
317 | */ | 323 | */ |
318 | if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT) | 324 | if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT) |
319 | && (fpscr & FPSCR_IXE)) { | 325 | && (fpscr & FPSCR_IXE)) { |
320 | /* | 326 | /* |
321 | * Synchronous exception, emulate the trigger instruction | 327 | * Synchronous exception, emulate the trigger instruction |
322 | */ | 328 | */ |
323 | goto emulate; | 329 | goto emulate; |
324 | } | 330 | } |
325 | 331 | ||
326 | if (fpexc & FPEXC_EX) { | 332 | if (fpexc & FPEXC_EX) { |
327 | #ifndef CONFIG_CPU_FEROCEON | 333 | #ifndef CONFIG_CPU_FEROCEON |
328 | /* | 334 | /* |
329 | * Asynchronous exception. The instruction is read from FPINST | 335 | * Asynchronous exception. The instruction is read from FPINST |
330 | * and the interrupted instruction has to be restarted. | 336 | * and the interrupted instruction has to be restarted. |
331 | */ | 337 | */ |
332 | trigger = fmrx(FPINST); | 338 | trigger = fmrx(FPINST); |
333 | regs->ARM_pc -= 4; | 339 | regs->ARM_pc -= 4; |
334 | #endif | 340 | #endif |
335 | } else if (!(fpexc & FPEXC_DEX)) { | 341 | } else if (!(fpexc & FPEXC_DEX)) { |
336 | /* | 342 | /* |
337 | * Illegal combination of bits. It can be caused by an | 343 | * Illegal combination of bits. It can be caused by an |
338 | * unallocated VFP instruction but with FPSCR.IXE set and not | 344 | * unallocated VFP instruction but with FPSCR.IXE set and not |
339 | * on VFP subarch 1. | 345 | * on VFP subarch 1. |
340 | */ | 346 | */ |
341 | vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); | 347 | vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); |
342 | goto exit; | 348 | goto exit; |
343 | } | 349 | } |
344 | 350 | ||
345 | /* | 351 | /* |
346 | * Modify fpscr to indicate the number of iterations remaining. | 352 | * Modify fpscr to indicate the number of iterations remaining. |
347 | * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates | 353 | * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates |
348 | * whether FPEXC.VECITR or FPSCR.LEN is used. | 354 | * whether FPEXC.VECITR or FPSCR.LEN is used. |
349 | */ | 355 | */ |
350 | if (fpexc & (FPEXC_EX | FPEXC_VV)) { | 356 | if (fpexc & (FPEXC_EX | FPEXC_VV)) { |
351 | u32 len; | 357 | u32 len; |
352 | 358 | ||
353 | len = fpexc + (1 << FPEXC_LENGTH_BIT); | 359 | len = fpexc + (1 << FPEXC_LENGTH_BIT); |
354 | 360 | ||
355 | fpscr &= ~FPSCR_LENGTH_MASK; | 361 | fpscr &= ~FPSCR_LENGTH_MASK; |
356 | fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); | 362 | fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); |
357 | } | 363 | } |
358 | 364 | ||
359 | /* | 365 | /* |
360 | * Handle the first FP instruction. We used to take note of the | 366 | * Handle the first FP instruction. We used to take note of the |
361 | * FPEXC bounce reason, but this appears to be unreliable. | 367 | * FPEXC bounce reason, but this appears to be unreliable. |
362 | * Emulate the bounced instruction instead. | 368 | * Emulate the bounced instruction instead. |
363 | */ | 369 | */ |
364 | exceptions = vfp_emulate_instruction(trigger, fpscr, regs); | 370 | exceptions = vfp_emulate_instruction(trigger, fpscr, regs); |
365 | if (exceptions) | 371 | if (exceptions) |
366 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); | 372 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); |
367 | 373 | ||
368 | /* | 374 | /* |
369 | * If there isn't a second FP instruction, exit now. Note that | 375 | * If there isn't a second FP instruction, exit now. Note that |
370 | * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. | 376 | * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. |
371 | */ | 377 | */ |
372 | if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) | 378 | if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) |
373 | goto exit; | 379 | goto exit; |
374 | 380 | ||
375 | /* | 381 | /* |
376 | * The barrier() here prevents fpinst2 being read | 382 | * The barrier() here prevents fpinst2 being read |
377 | * before the condition above. | 383 | * before the condition above. |
378 | */ | 384 | */ |
379 | barrier(); | 385 | barrier(); |
380 | trigger = fmrx(FPINST2); | 386 | trigger = fmrx(FPINST2); |
381 | 387 | ||
382 | emulate: | 388 | emulate: |
383 | exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); | 389 | exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); |
384 | if (exceptions) | 390 | if (exceptions) |
385 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); | 391 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); |
386 | exit: | 392 | exit: |
387 | preempt_enable(); | 393 | preempt_enable(); |
388 | } | 394 | } |
389 | 395 | ||
390 | static void vfp_enable(void *unused) | 396 | static void vfp_enable(void *unused) |
391 | { | 397 | { |
392 | u32 access = get_copro_access(); | 398 | u32 access = get_copro_access(); |
393 | 399 | ||
394 | /* | 400 | /* |
395 | * Enable full access to VFP (cp10 and cp11) | 401 | * Enable full access to VFP (cp10 and cp11) |
396 | */ | 402 | */ |
397 | set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); | 403 | set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); |
398 | } | 404 | } |
399 | 405 | ||
400 | #ifdef CONFIG_PM | 406 | #ifdef CONFIG_PM |
401 | #include <linux/syscore_ops.h> | 407 | #include <linux/syscore_ops.h> |
402 | 408 | ||
403 | static int vfp_pm_suspend(void) | 409 | static int vfp_pm_suspend(void) |
404 | { | 410 | { |
405 | struct thread_info *ti = current_thread_info(); | 411 | struct thread_info *ti = current_thread_info(); |
406 | u32 fpexc = fmrx(FPEXC); | 412 | u32 fpexc = fmrx(FPEXC); |
407 | 413 | ||
408 | /* if vfp is on, then save state for resumption */ | 414 | /* if vfp is on, then save state for resumption */ |
409 | if (fpexc & FPEXC_EN) { | 415 | if (fpexc & FPEXC_EN) { |
410 | printk(KERN_DEBUG "%s: saving vfp state\n", __func__); | 416 | printk(KERN_DEBUG "%s: saving vfp state\n", __func__); |
411 | vfp_save_state(&ti->vfpstate, fpexc); | 417 | vfp_save_state(&ti->vfpstate, fpexc); |
412 | 418 | ||
413 | /* disable, just in case */ | 419 | /* disable, just in case */ |
414 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 420 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
415 | } | 421 | } |
416 | 422 | ||
417 | /* clear any information we had about last context state */ | 423 | /* clear any information we had about last context state */ |
418 | memset(last_VFP_context, 0, sizeof(last_VFP_context)); | 424 | memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); |
419 | 425 | ||
420 | return 0; | 426 | return 0; |
421 | } | 427 | } |
422 | 428 | ||
423 | static void vfp_pm_resume(void) | 429 | static void vfp_pm_resume(void) |
424 | { | 430 | { |
425 | /* ensure we have access to the vfp */ | 431 | /* ensure we have access to the vfp */ |
426 | vfp_enable(NULL); | 432 | vfp_enable(NULL); |
427 | 433 | ||
428 | /* and disable it to ensure the next usage restores the state */ | 434 | /* and disable it to ensure the next usage restores the state */ |
429 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 435 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
430 | } | 436 | } |
431 | 437 | ||
432 | static struct syscore_ops vfp_pm_syscore_ops = { | 438 | static struct syscore_ops vfp_pm_syscore_ops = { |
433 | .suspend = vfp_pm_suspend, | 439 | .suspend = vfp_pm_suspend, |
434 | .resume = vfp_pm_resume, | 440 | .resume = vfp_pm_resume, |
435 | }; | 441 | }; |
436 | 442 | ||
437 | static void vfp_pm_init(void) | 443 | static void vfp_pm_init(void) |
438 | { | 444 | { |
439 | register_syscore_ops(&vfp_pm_syscore_ops); | 445 | register_syscore_ops(&vfp_pm_syscore_ops); |
440 | } | 446 | } |
441 | 447 | ||
442 | #else | 448 | #else |
443 | static inline void vfp_pm_init(void) { } | 449 | static inline void vfp_pm_init(void) { } |
444 | #endif /* CONFIG_PM */ | 450 | #endif /* CONFIG_PM */ |
445 | 451 | ||
446 | void vfp_sync_hwstate(struct thread_info *thread) | 452 | void vfp_sync_hwstate(struct thread_info *thread) |
447 | { | 453 | { |
448 | unsigned int cpu = get_cpu(); | 454 | unsigned int cpu = get_cpu(); |
449 | 455 | ||
450 | /* | 456 | /* |
451 | * If the thread we're interested in is the current owner of the | 457 | * If the thread we're interested in is the current owner of the |
452 | * hardware VFP state, then we need to save its state. | 458 | * hardware VFP state, then we need to save its state. |
453 | */ | 459 | */ |
454 | if (last_VFP_context[cpu] == &thread->vfpstate) { | 460 | if (vfp_current_hw_state[cpu] == &thread->vfpstate) { |
455 | u32 fpexc = fmrx(FPEXC); | 461 | u32 fpexc = fmrx(FPEXC); |
456 | 462 | ||
457 | /* | 463 | /* |
458 | * Save the last VFP state on this CPU. | 464 | * Save the last VFP state on this CPU. |
459 | */ | 465 | */ |
460 | fmxr(FPEXC, fpexc | FPEXC_EN); | 466 | fmxr(FPEXC, fpexc | FPEXC_EN); |
461 | vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); | 467 | vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); |
462 | fmxr(FPEXC, fpexc); | 468 | fmxr(FPEXC, fpexc); |
463 | } | 469 | } |
464 | 470 | ||
465 | put_cpu(); | 471 | put_cpu(); |
466 | } | 472 | } |
467 | 473 | ||
468 | void vfp_flush_hwstate(struct thread_info *thread) | 474 | void vfp_flush_hwstate(struct thread_info *thread) |
469 | { | 475 | { |
470 | unsigned int cpu = get_cpu(); | 476 | unsigned int cpu = get_cpu(); |
471 | 477 | ||
472 | /* | 478 | /* |
473 | * If the thread we're interested in is the current owner of the | 479 | * If the thread we're interested in is the current owner of the |
474 | * hardware VFP state, then we need to save its state. | 480 | * hardware VFP state, then we need to save its state. |
475 | */ | 481 | */ |
476 | if (last_VFP_context[cpu] == &thread->vfpstate) { | 482 | if (vfp_current_hw_state[cpu] == &thread->vfpstate) { |
477 | u32 fpexc = fmrx(FPEXC); | 483 | u32 fpexc = fmrx(FPEXC); |
478 | 484 | ||
479 | fmxr(FPEXC, fpexc & ~FPEXC_EN); | 485 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
480 | 486 | ||
481 | /* | 487 | /* |
482 | * Set the context to NULL to force a reload the next time | 488 | * Set the context to NULL to force a reload the next time |
483 | * the thread uses the VFP. | 489 | * the thread uses the VFP. |
484 | */ | 490 | */ |
485 | last_VFP_context[cpu] = NULL; | 491 | vfp_current_hw_state[cpu] = NULL; |
486 | } | 492 | } |
487 | 493 | ||
488 | #ifdef CONFIG_SMP | 494 | #ifdef CONFIG_SMP |
489 | /* | 495 | /* |
490 | * For SMP we still have to take care of the case where the thread | 496 | * For SMP we still have to take care of the case where the thread |
491 | * migrates to another CPU and then back to the original CPU on which | 497 | * migrates to another CPU and then back to the original CPU on which |
492 | * the last VFP user is still the same thread. Mark the thread VFP | 498 | * the last VFP user is still the same thread. Mark the thread VFP |
493 | * state as belonging to a non-existent CPU so that the saved one will | 499 | * state as belonging to a non-existent CPU so that the saved one will |
494 | * be reloaded in the above case. | 500 | * be reloaded in the above case. |
495 | */ | 501 | */ |
496 | thread->vfpstate.hard.cpu = NR_CPUS; | 502 | thread->vfpstate.hard.cpu = NR_CPUS; |
497 | #endif | 503 | #endif |
498 | put_cpu(); | 504 | put_cpu(); |
499 | } | 505 | } |
500 | 506 | ||
501 | /* | 507 | /* |
502 | * VFP hardware can lose all context when a CPU goes offline. | 508 | * VFP hardware can lose all context when a CPU goes offline. |
503 | * As we will be running in SMP mode with CPU hotplug, we will save the | 509 | * As we will be running in SMP mode with CPU hotplug, we will save the |
504 | * hardware state at every thread switch. We clear our held state when | 510 | * hardware state at every thread switch. We clear our held state when |
505 | * a CPU has been killed, indicating that the VFP hardware doesn't contain | 511 | * a CPU has been killed, indicating that the VFP hardware doesn't contain |
506 | * a threads VFP state. When a CPU starts up, we re-enable access to the | 512 | * a threads VFP state. When a CPU starts up, we re-enable access to the |
507 | * VFP hardware. | 513 | * VFP hardware. |
508 | * | 514 | * |
509 | * Both CPU_DYING and CPU_STARTING are called on the CPU which | 515 | * Both CPU_DYING and CPU_STARTING are called on the CPU which |
510 | * is being offlined/onlined. | 516 | * is being offlined/onlined. |
511 | */ | 517 | */ |
512 | static int vfp_hotplug(struct notifier_block *b, unsigned long action, | 518 | static int vfp_hotplug(struct notifier_block *b, unsigned long action, |
513 | void *hcpu) | 519 | void *hcpu) |
514 | { | 520 | { |
515 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { | 521 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { |
516 | unsigned int cpu = (long)hcpu; | 522 | unsigned int cpu = (long)hcpu; |
517 | last_VFP_context[cpu] = NULL; | 523 | vfp_current_hw_state[cpu] = NULL; |
518 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 524 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
519 | vfp_enable(NULL); | 525 | vfp_enable(NULL); |
520 | return NOTIFY_OK; | 526 | return NOTIFY_OK; |
521 | } | 527 | } |
522 | 528 | ||
523 | /* | 529 | /* |
524 | * VFP support code initialisation. | 530 | * VFP support code initialisation. |
525 | */ | 531 | */ |
526 | static int __init vfp_init(void) | 532 | static int __init vfp_init(void) |
527 | { | 533 | { |
528 | unsigned int vfpsid; | 534 | unsigned int vfpsid; |
529 | unsigned int cpu_arch = cpu_architecture(); | 535 | unsigned int cpu_arch = cpu_architecture(); |
530 | 536 | ||
531 | if (cpu_arch >= CPU_ARCH_ARMv6) | 537 | if (cpu_arch >= CPU_ARCH_ARMv6) |
532 | vfp_enable(NULL); | 538 | vfp_enable(NULL); |
533 | 539 | ||
534 | /* | 540 | /* |
535 | * First check that there is a VFP that we can use. | 541 | * First check that there is a VFP that we can use. |
536 | * The handler is already setup to just log calls, so | 542 | * The handler is already setup to just log calls, so |
537 | * we just need to read the VFPSID register. | 543 | * we just need to read the VFPSID register. |
538 | */ | 544 | */ |
539 | vfp_vector = vfp_testing_entry; | 545 | vfp_vector = vfp_testing_entry; |
540 | barrier(); | 546 | barrier(); |
541 | vfpsid = fmrx(FPSID); | 547 | vfpsid = fmrx(FPSID); |
542 | barrier(); | 548 | barrier(); |
543 | vfp_vector = vfp_null_entry; | 549 | vfp_vector = vfp_null_entry; |
544 | 550 | ||
545 | printk(KERN_INFO "VFP support v0.3: "); | 551 | printk(KERN_INFO "VFP support v0.3: "); |
546 | if (VFP_arch) | 552 | if (VFP_arch) |
547 | printk("not present\n"); | 553 | printk("not present\n"); |
548 | else if (vfpsid & FPSID_NODOUBLE) { | 554 | else if (vfpsid & FPSID_NODOUBLE) { |
549 | printk("no double precision support\n"); | 555 | printk("no double precision support\n"); |
550 | } else { | 556 | } else { |
551 | hotcpu_notifier(vfp_hotplug, 0); | 557 | hotcpu_notifier(vfp_hotplug, 0); |
552 | 558 | ||
553 | smp_call_function(vfp_enable, NULL, 1); | 559 | smp_call_function(vfp_enable, NULL, 1); |
554 | 560 | ||
555 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ | 561 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ |
556 | printk("implementor %02x architecture %d part %02x variant %x rev %x\n", | 562 | printk("implementor %02x architecture %d part %02x variant %x rev %x\n", |
557 | (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, | 563 | (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, |
558 | (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, | 564 | (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, |
559 | (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, | 565 | (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, |
560 | (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, | 566 | (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, |
561 | (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); | 567 | (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); |
562 | 568 | ||
563 | vfp_vector = vfp_support_entry; | 569 | vfp_vector = vfp_support_entry; |
564 | 570 | ||
565 | thread_register_notifier(&vfp_notifier_block); | 571 | thread_register_notifier(&vfp_notifier_block); |
566 | vfp_pm_init(); | 572 | vfp_pm_init(); |
567 | 573 | ||
568 | /* | 574 | /* |
569 | * We detected VFP, and the support code is | 575 | * We detected VFP, and the support code is |
570 | * in place; report VFP support to userspace. | 576 | * in place; report VFP support to userspace. |
571 | */ | 577 | */ |
572 | elf_hwcap |= HWCAP_VFP; | 578 | elf_hwcap |= HWCAP_VFP; |
573 | #ifdef CONFIG_VFPv3 | 579 | #ifdef CONFIG_VFPv3 |
574 | if (VFP_arch >= 2) { | 580 | if (VFP_arch >= 2) { |
575 | elf_hwcap |= HWCAP_VFPv3; | 581 | elf_hwcap |= HWCAP_VFPv3; |
576 | 582 | ||
577 | /* | 583 | /* |
578 | * Check for VFPv3 D16. CPUs in this configuration | 584 | * Check for VFPv3 D16. CPUs in this configuration |
579 | * only have 16 x 64bit registers. | 585 | * only have 16 x 64bit registers. |
580 | */ | 586 | */ |
581 | if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) | 587 | if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) |
582 | elf_hwcap |= HWCAP_VFPv3D16; | 588 | elf_hwcap |= HWCAP_VFPv3D16; |
583 | } | 589 | } |
584 | #endif | 590 | #endif |
585 | #ifdef CONFIG_NEON | 591 | #ifdef CONFIG_NEON |
586 | /* | 592 | /* |
587 | * Check for the presence of the Advanced SIMD | 593 | * Check for the presence of the Advanced SIMD |
588 | * load/store instructions, integer and single | 594 | * load/store instructions, integer and single |
589 | * precision floating point operations. Only check | 595 | * precision floating point operations. Only check |
590 | * for NEON if the hardware has the MVFR registers. | 596 | * for NEON if the hardware has the MVFR registers. |
591 | */ | 597 | */ |
592 | if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { | 598 | if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { |
593 | if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) | 599 | if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) |
594 | elf_hwcap |= HWCAP_NEON; | 600 | elf_hwcap |= HWCAP_NEON; |
595 | } | 601 | } |
596 | #endif | 602 | #endif |
597 | } | 603 | } |
598 | return 0; | 604 | return 0; |
599 | } | 605 | } |
600 | 606 | ||
601 | late_initcall(vfp_init); | 607 | late_initcall(vfp_init); |