Commit a0a55682b83fd5f012afadcf415b030d7424ae68
Exists in
master
and in
7 other branches
Merge branch 'hotplug' into devel
Conflicts: arch/arm/kernel/head-common.S
Showing 29 changed files Side-by-side Diff
- arch/arm/kernel/head-common.S
- arch/arm/kernel/head-nommu.S
- arch/arm/kernel/head.S
- arch/arm/kernel/smp.c
- arch/arm/kernel/vmlinux.lds.S
- arch/arm/mm/proc-arm1020.S
- arch/arm/mm/proc-arm1020e.S
- arch/arm/mm/proc-arm1022.S
- arch/arm/mm/proc-arm1026.S
- arch/arm/mm/proc-arm6_7.S
- arch/arm/mm/proc-arm720.S
- arch/arm/mm/proc-arm740.S
- arch/arm/mm/proc-arm7tdmi.S
- arch/arm/mm/proc-arm920.S
- arch/arm/mm/proc-arm922.S
- arch/arm/mm/proc-arm925.S
- arch/arm/mm/proc-arm926.S
- arch/arm/mm/proc-arm940.S
- arch/arm/mm/proc-arm946.S
- arch/arm/mm/proc-arm9tdmi.S
- arch/arm/mm/proc-fa526.S
- arch/arm/mm/proc-feroceon.S
- arch/arm/mm/proc-mohawk.S
- arch/arm/mm/proc-sa110.S
- arch/arm/mm/proc-sa1100.S
- arch/arm/mm/proc-v6.S
- arch/arm/mm/proc-v7.S
- arch/arm/mm/proc-xsc3.S
- arch/arm/mm/proc-xscale.S
arch/arm/kernel/head-common.S
... | ... | @@ -15,56 +15,7 @@ |
15 | 15 | #define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) |
16 | 16 | #define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2) |
17 | 17 | |
18 | - .align 2 | |
19 | - .type __switch_data, %object | |
20 | -__switch_data: | |
21 | - .long __mmap_switched | |
22 | - .long __data_loc @ r4 | |
23 | - .long _sdata @ r5 | |
24 | - .long __bss_start @ r6 | |
25 | - .long _end @ r7 | |
26 | - .long processor_id @ r4 | |
27 | - .long __machine_arch_type @ r5 | |
28 | - .long __atags_pointer @ r6 | |
29 | - .long cr_alignment @ r7 | |
30 | - .long init_thread_union + THREAD_START_SP @ sp | |
31 | - | |
32 | 18 | /* |
33 | - * The following fragment of code is executed with the MMU on in MMU mode, | |
34 | - * and uses absolute addresses; this is not position independent. | |
35 | - * | |
36 | - * r0 = cp#15 control register | |
37 | - * r1 = machine ID | |
38 | - * r2 = atags pointer | |
39 | - * r9 = processor ID | |
40 | - */ | |
41 | -__mmap_switched: | |
42 | - adr r3, __switch_data + 4 | |
43 | - | |
44 | - ldmia r3!, {r4, r5, r6, r7} | |
45 | - cmp r4, r5 @ Copy data segment if needed | |
46 | -1: cmpne r5, r6 | |
47 | - ldrne fp, [r4], #4 | |
48 | - strne fp, [r5], #4 | |
49 | - bne 1b | |
50 | - | |
51 | - mov fp, #0 @ Clear BSS (and zero fp) | |
52 | -1: cmp r6, r7 | |
53 | - strcc fp, [r6],#4 | |
54 | - bcc 1b | |
55 | - | |
56 | - ARM( ldmia r3, {r4, r5, r6, r7, sp}) | |
57 | - THUMB( ldmia r3, {r4, r5, r6, r7} ) | |
58 | - THUMB( ldr sp, [r3, #16] ) | |
59 | - str r9, [r4] @ Save processor ID | |
60 | - str r1, [r5] @ Save machine type | |
61 | - str r2, [r6] @ Save atags pointer | |
62 | - bic r4, r0, #CR_A @ Clear 'A' bit | |
63 | - stmia r7, {r0, r4} @ Save control register values | |
64 | - b start_kernel | |
65 | -ENDPROC(__mmap_switched) | |
66 | - | |
67 | -/* | |
68 | 19 | * Exception handling. Something went wrong and we can't proceed. We |
69 | 20 | * ought to tell the user, but since we don't have any guarantee that |
70 | 21 | * we're even running on the right architecture, we do virtually nothing. |
... | ... | @@ -73,21 +24,7 @@ |
73 | 24 | * and hope for the best (useful if bootloader fails to pass a proper |
74 | 25 | * machine ID for example). |
75 | 26 | */ |
76 | -__error_p: | |
77 | -#ifdef CONFIG_DEBUG_LL | |
78 | - adr r0, str_p1 | |
79 | - bl printascii | |
80 | - mov r0, r9 | |
81 | - bl printhex8 | |
82 | - adr r0, str_p2 | |
83 | - bl printascii | |
84 | - b __error | |
85 | -str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" | |
86 | -str_p2: .asciz ").\n" | |
87 | - .align | |
88 | -#endif | |
89 | -ENDPROC(__error_p) | |
90 | - | |
27 | + __HEAD | |
91 | 28 | __error_a: |
92 | 29 | #ifdef CONFIG_DEBUG_LL |
93 | 30 | mov r4, r1 @ preserve machine ID |
... | ... | @@ -97,7 +34,7 @@ |
97 | 34 | bl printhex8 |
98 | 35 | adr r0, str_a2 |
99 | 36 | bl printascii |
100 | - adr r3, 4f | |
37 | + adr r3, __lookup_machine_type_data | |
101 | 38 | ldmia r3, {r4, r5, r6} @ get machine desc list |
102 | 39 | sub r4, r3, r4 @ get offset between virt&phys |
103 | 40 | add r5, r5, r4 @ convert virt addresses to |
104 | 41 | |
... | ... | @@ -125,79 +62,7 @@ |
125 | 62 | .align |
126 | 63 | #endif |
127 | 64 | |
128 | -__error: | |
129 | -#ifdef CONFIG_ARCH_RPC | |
130 | 65 | /* |
131 | - * Turn the screen red on a error - RiscPC only. | |
132 | - */ | |
133 | - mov r0, #0x02000000 | |
134 | - mov r3, #0x11 | |
135 | - orr r3, r3, r3, lsl #8 | |
136 | - orr r3, r3, r3, lsl #16 | |
137 | - str r3, [r0], #4 | |
138 | - str r3, [r0], #4 | |
139 | - str r3, [r0], #4 | |
140 | - str r3, [r0], #4 | |
141 | -#endif | |
142 | -1: mov r0, r0 | |
143 | - b 1b | |
144 | -ENDPROC(__error) | |
145 | - | |
146 | - | |
147 | -/* | |
148 | - * Read processor ID register (CP#15, CR0), and look up in the linker-built | |
149 | - * supported processor list. Note that we can't use the absolute addresses | |
150 | - * for the __proc_info lists since we aren't running with the MMU on | |
151 | - * (and therefore, we are not in the correct address space). We have to | |
152 | - * calculate the offset. | |
153 | - * | |
154 | - * r9 = cpuid | |
155 | - * Returns: | |
156 | - * r3, r4, r6 corrupted | |
157 | - * r5 = proc_info pointer in physical address space | |
158 | - * r9 = cpuid (preserved) | |
159 | - */ | |
160 | -__lookup_processor_type: | |
161 | - adr r3, 3f | |
162 | - ldmia r3, {r5 - r7} | |
163 | - add r3, r3, #8 | |
164 | - sub r3, r3, r7 @ get offset between virt&phys | |
165 | - add r5, r5, r3 @ convert virt addresses to | |
166 | - add r6, r6, r3 @ physical address space | |
167 | -1: ldmia r5, {r3, r4} @ value, mask | |
168 | - and r4, r4, r9 @ mask wanted bits | |
169 | - teq r3, r4 | |
170 | - beq 2f | |
171 | - add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) | |
172 | - cmp r5, r6 | |
173 | - blo 1b | |
174 | - mov r5, #0 @ unknown processor | |
175 | -2: mov pc, lr | |
176 | -ENDPROC(__lookup_processor_type) | |
177 | - | |
178 | -/* | |
179 | - * This provides a C-API version of the above function. | |
180 | - */ | |
181 | -ENTRY(lookup_processor_type) | |
182 | - stmfd sp!, {r4 - r7, r9, lr} | |
183 | - mov r9, r0 | |
184 | - bl __lookup_processor_type | |
185 | - mov r0, r5 | |
186 | - ldmfd sp!, {r4 - r7, r9, pc} | |
187 | -ENDPROC(lookup_processor_type) | |
188 | - | |
189 | -/* | |
190 | - * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for | |
191 | - * more information about the __proc_info and __arch_info structures. | |
192 | - */ | |
193 | - .align 2 | |
194 | -3: .long __proc_info_begin | |
195 | - .long __proc_info_end | |
196 | -4: .long . | |
197 | - .long __arch_info_begin | |
198 | - .long __arch_info_end | |
199 | - | |
200 | -/* | |
201 | 66 | * Lookup machine architecture in the linker-build list of architectures. |
202 | 67 | * Note that we can't use the absolute addresses for the __arch_info |
203 | 68 | * lists since we aren't running with the MMU on (and therefore, we are |
... | ... | @@ -209,7 +74,7 @@ |
209 | 74 | * r5 = mach_info pointer in physical address space |
210 | 75 | */ |
211 | 76 | __lookup_machine_type: |
212 | - adr r3, 4b | |
77 | + adr r3, __lookup_machine_type_data | |
213 | 78 | ldmia r3, {r4, r5, r6} |
214 | 79 | sub r3, r3, r4 @ get offset between virt&phys |
215 | 80 | add r5, r5, r3 @ convert virt addresses to |
216 | 81 | |
... | ... | @@ -225,15 +90,16 @@ |
225 | 90 | ENDPROC(__lookup_machine_type) |
226 | 91 | |
227 | 92 | /* |
228 | - * This provides a C-API version of the above function. | |
93 | + * Look in arch/arm/kernel/arch.[ch] for information about the | |
94 | + * __arch_info structures. | |
229 | 95 | */ |
230 | -ENTRY(lookup_machine_type) | |
231 | - stmfd sp!, {r4 - r6, lr} | |
232 | - mov r1, r0 | |
233 | - bl __lookup_machine_type | |
234 | - mov r0, r5 | |
235 | - ldmfd sp!, {r4 - r6, pc} | |
236 | -ENDPROC(lookup_machine_type) | |
96 | + .align 2 | |
97 | + .type __lookup_machine_type_data, %object | |
98 | +__lookup_machine_type_data: | |
99 | + .long . | |
100 | + .long __arch_info_begin | |
101 | + .long __arch_info_end | |
102 | + .size __lookup_machine_type_data, . - __lookup_machine_type_data | |
237 | 103 | |
238 | 104 | /* Determine validity of the r2 atags pointer. The heuristic requires |
239 | 105 | * that the pointer be aligned, in the first 16k of physical RAM and |
... | ... | @@ -265,4 +131,151 @@ |
265 | 131 | 1: mov r2, #0 |
266 | 132 | mov pc, lr |
267 | 133 | ENDPROC(__vet_atags) |
134 | + | |
135 | +/* | |
136 | + * The following fragment of code is executed with the MMU on in MMU mode, | |
137 | + * and uses absolute addresses; this is not position independent. | |
138 | + * | |
139 | + * r0 = cp#15 control register | |
140 | + * r1 = machine ID | |
141 | + * r2 = atags pointer | |
142 | + * r9 = processor ID | |
143 | + */ | |
144 | + __INIT | |
145 | +__mmap_switched: | |
146 | + adr r3, __mmap_switched_data | |
147 | + | |
148 | + ldmia r3!, {r4, r5, r6, r7} | |
149 | + cmp r4, r5 @ Copy data segment if needed | |
150 | +1: cmpne r5, r6 | |
151 | + ldrne fp, [r4], #4 | |
152 | + strne fp, [r5], #4 | |
153 | + bne 1b | |
154 | + | |
155 | + mov fp, #0 @ Clear BSS (and zero fp) | |
156 | +1: cmp r6, r7 | |
157 | + strcc fp, [r6],#4 | |
158 | + bcc 1b | |
159 | + | |
160 | + ARM( ldmia r3, {r4, r5, r6, r7, sp}) | |
161 | + THUMB( ldmia r3, {r4, r5, r6, r7} ) | |
162 | + THUMB( ldr sp, [r3, #16] ) | |
163 | + str r9, [r4] @ Save processor ID | |
164 | + str r1, [r5] @ Save machine type | |
165 | + str r2, [r6] @ Save atags pointer | |
166 | + bic r4, r0, #CR_A @ Clear 'A' bit | |
167 | + stmia r7, {r0, r4} @ Save control register values | |
168 | + b start_kernel | |
169 | +ENDPROC(__mmap_switched) | |
170 | + | |
171 | + .align 2 | |
172 | + .type __mmap_switched_data, %object | |
173 | +__mmap_switched_data: | |
174 | + .long __data_loc @ r4 | |
175 | + .long _sdata @ r5 | |
176 | + .long __bss_start @ r6 | |
177 | + .long _end @ r7 | |
178 | + .long processor_id @ r4 | |
179 | + .long __machine_arch_type @ r5 | |
180 | + .long __atags_pointer @ r6 | |
181 | + .long cr_alignment @ r7 | |
182 | + .long init_thread_union + THREAD_START_SP @ sp | |
183 | + .size __mmap_switched_data, . - __mmap_switched_data | |
184 | + | |
185 | +/* | |
186 | + * This provides a C-API version of __lookup_machine_type | |
187 | + */ | |
188 | +ENTRY(lookup_machine_type) | |
189 | + stmfd sp!, {r4 - r6, lr} | |
190 | + mov r1, r0 | |
191 | + bl __lookup_machine_type | |
192 | + mov r0, r5 | |
193 | + ldmfd sp!, {r4 - r6, pc} | |
194 | +ENDPROC(lookup_machine_type) | |
195 | + | |
196 | +/* | |
197 | + * This provides a C-API version of __lookup_processor_type | |
198 | + */ | |
199 | +ENTRY(lookup_processor_type) | |
200 | + stmfd sp!, {r4 - r6, r9, lr} | |
201 | + mov r9, r0 | |
202 | + bl __lookup_processor_type | |
203 | + mov r0, r5 | |
204 | + ldmfd sp!, {r4 - r6, r9, pc} | |
205 | +ENDPROC(lookup_processor_type) | |
206 | + | |
207 | +/* | |
208 | + * Read processor ID register (CP#15, CR0), and look up in the linker-built | |
209 | + * supported processor list. Note that we can't use the absolute addresses | |
210 | + * for the __proc_info lists since we aren't running with the MMU on | |
211 | + * (and therefore, we are not in the correct address space). We have to | |
212 | + * calculate the offset. | |
213 | + * | |
214 | + * r9 = cpuid | |
215 | + * Returns: | |
216 | + * r3, r4, r6 corrupted | |
217 | + * r5 = proc_info pointer in physical address space | |
218 | + * r9 = cpuid (preserved) | |
219 | + */ | |
220 | + __CPUINIT | |
221 | +__lookup_processor_type: | |
222 | + adr r3, __lookup_processor_type_data | |
223 | + ldmia r3, {r4 - r6} | |
224 | + sub r3, r3, r4 @ get offset between virt&phys | |
225 | + add r5, r5, r3 @ convert virt addresses to | |
226 | + add r6, r6, r3 @ physical address space | |
227 | +1: ldmia r5, {r3, r4} @ value, mask | |
228 | + and r4, r4, r9 @ mask wanted bits | |
229 | + teq r3, r4 | |
230 | + beq 2f | |
231 | + add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) | |
232 | + cmp r5, r6 | |
233 | + blo 1b | |
234 | + mov r5, #0 @ unknown processor | |
235 | +2: mov pc, lr | |
236 | +ENDPROC(__lookup_processor_type) | |
237 | + | |
238 | +/* | |
239 | + * Look in <asm/procinfo.h> for information about the __proc_info structure. | |
240 | + */ | |
241 | + .align 2 | |
242 | + .type __lookup_processor_type_data, %object | |
243 | +__lookup_processor_type_data: | |
244 | + .long . | |
245 | + .long __proc_info_begin | |
246 | + .long __proc_info_end | |
247 | + .size __lookup_processor_type_data, . - __lookup_processor_type_data | |
248 | + | |
249 | +__error_p: | |
250 | +#ifdef CONFIG_DEBUG_LL | |
251 | + adr r0, str_p1 | |
252 | + bl printascii | |
253 | + mov r0, r9 | |
254 | + bl printhex8 | |
255 | + adr r0, str_p2 | |
256 | + bl printascii | |
257 | + b __error | |
258 | +str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" | |
259 | +str_p2: .asciz ").\n" | |
260 | + .align | |
261 | +#endif | |
262 | +ENDPROC(__error_p) | |
263 | + | |
264 | +__error: | |
265 | +#ifdef CONFIG_ARCH_RPC | |
266 | +/* | |
267 | + * Turn the screen red on a error - RiscPC only. | |
268 | + */ | |
269 | + mov r0, #0x02000000 | |
270 | + mov r3, #0x11 | |
271 | + orr r3, r3, r3, lsl #8 | |
272 | + orr r3, r3, r3, lsl #16 | |
273 | + str r3, [r0], #4 | |
274 | + str r3, [r0], #4 | |
275 | + str r3, [r0], #4 | |
276 | + str r3, [r0], #4 | |
277 | +#endif | |
278 | +1: mov r0, r0 | |
279 | + b 1b | |
280 | +ENDPROC(__error) |
arch/arm/kernel/head-nommu.S
... | ... | @@ -48,8 +48,6 @@ |
48 | 48 | movs r8, r5 @ invalid machine (r5=0)? |
49 | 49 | beq __error_a @ yes, error 'a' |
50 | 50 | |
51 | - ldr r13, __switch_data @ address to jump to after | |
52 | - @ the initialization is done | |
53 | 51 | adr lr, BSYM(__after_proc_init) @ return (PIC) address |
54 | 52 | ARM( add pc, r10, #PROCINFO_INITFUNC ) |
55 | 53 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) |
... | ... | @@ -87,8 +85,7 @@ |
87 | 85 | mcr p15, 0, r0, c1, c0, 0 @ write control reg |
88 | 86 | #endif /* CONFIG_CPU_CP15 */ |
89 | 87 | |
90 | - mov r3, r13 | |
91 | - mov pc, r3 @ clear the BSS and jump | |
88 | + b __mmap_switched @ clear the BSS and jump | |
92 | 89 | @ to start_kernel |
93 | 90 | ENDPROC(__after_proc_init) |
94 | 91 | .ltorg |
arch/arm/kernel/head.S
... | ... | @@ -98,115 +98,17 @@ |
98 | 98 | * above. On return, the CPU will be ready for the MMU to be |
99 | 99 | * turned on, and r0 will hold the CPU control register value. |
100 | 100 | */ |
101 | - ldr r13, __switch_data @ address to jump to after | |
101 | + ldr r13, =__mmap_switched @ address to jump to after | |
102 | 102 | @ mmu has been enabled |
103 | - adr lr, BSYM(__enable_mmu) @ return (PIC) address | |
103 | + adr lr, BSYM(1f) @ return (PIC) address | |
104 | 104 | ARM( add pc, r10, #PROCINFO_INITFUNC ) |
105 | 105 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) |
106 | 106 | THUMB( mov pc, r12 ) |
107 | +1: b __enable_mmu | |
107 | 108 | ENDPROC(stext) |
109 | + .ltorg | |
108 | 110 | |
109 | -#if defined(CONFIG_SMP) | |
110 | -ENTRY(secondary_startup) | |
111 | - /* | |
112 | - * Common entry point for secondary CPUs. | |
113 | - * | |
114 | - * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | |
115 | - * the processor type - there is no need to check the machine type | |
116 | - * as it has already been validated by the primary processor. | |
117 | - */ | |
118 | - setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | |
119 | - mrc p15, 0, r9, c0, c0 @ get processor id | |
120 | - bl __lookup_processor_type | |
121 | - movs r10, r5 @ invalid processor? | |
122 | - moveq r0, #'p' @ yes, error 'p' | |
123 | - beq __error | |
124 | - | |
125 | - /* | |
126 | - * Use the page tables supplied from __cpu_up. | |
127 | - */ | |
128 | - adr r4, __secondary_data | |
129 | - ldmia r4, {r5, r7, r12} @ address to jump to after | |
130 | - sub r4, r4, r5 @ mmu has been enabled | |
131 | - ldr r4, [r7, r4] @ get secondary_data.pgdir | |
132 | - adr lr, BSYM(__enable_mmu) @ return address | |
133 | - mov r13, r12 @ __secondary_switched address | |
134 | - ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor | |
135 | - @ (return control reg) | |
136 | - THUMB( add r12, r10, #PROCINFO_INITFUNC ) | |
137 | - THUMB( mov pc, r12 ) | |
138 | -ENDPROC(secondary_startup) | |
139 | - | |
140 | - /* | |
141 | - * r6 = &secondary_data | |
142 | - */ | |
143 | -ENTRY(__secondary_switched) | |
144 | - ldr sp, [r7, #4] @ get secondary_data.stack | |
145 | - mov fp, #0 | |
146 | - b secondary_start_kernel | |
147 | -ENDPROC(__secondary_switched) | |
148 | - | |
149 | - .type __secondary_data, %object | |
150 | -__secondary_data: | |
151 | - .long . | |
152 | - .long secondary_data | |
153 | - .long __secondary_switched | |
154 | -#endif /* defined(CONFIG_SMP) */ | |
155 | - | |
156 | - | |
157 | - | |
158 | 111 | /* |
159 | - * Setup common bits before finally enabling the MMU. Essentially | |
160 | - * this is just loading the page table pointer and domain access | |
161 | - * registers. | |
162 | - */ | |
163 | -__enable_mmu: | |
164 | -#ifdef CONFIG_ALIGNMENT_TRAP | |
165 | - orr r0, r0, #CR_A | |
166 | -#else | |
167 | - bic r0, r0, #CR_A | |
168 | -#endif | |
169 | -#ifdef CONFIG_CPU_DCACHE_DISABLE | |
170 | - bic r0, r0, #CR_C | |
171 | -#endif | |
172 | -#ifdef CONFIG_CPU_BPREDICT_DISABLE | |
173 | - bic r0, r0, #CR_Z | |
174 | -#endif | |
175 | -#ifdef CONFIG_CPU_ICACHE_DISABLE | |
176 | - bic r0, r0, #CR_I | |
177 | -#endif | |
178 | - mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | |
179 | - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | |
180 | - domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | |
181 | - domain_val(DOMAIN_IO, DOMAIN_CLIENT)) | |
182 | - mcr p15, 0, r5, c3, c0, 0 @ load domain access register | |
183 | - mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | |
184 | - b __turn_mmu_on | |
185 | -ENDPROC(__enable_mmu) | |
186 | - | |
187 | -/* | |
188 | - * Enable the MMU. This completely changes the structure of the visible | |
189 | - * memory space. You will not be able to trace execution through this. | |
190 | - * If you have an enquiry about this, *please* check the linux-arm-kernel | |
191 | - * mailing list archives BEFORE sending another post to the list. | |
192 | - * | |
193 | - * r0 = cp#15 control register | |
194 | - * r13 = *virtual* address to jump to upon completion | |
195 | - * | |
196 | - * other registers depend on the function called upon completion | |
197 | - */ | |
198 | - .align 5 | |
199 | -__turn_mmu_on: | |
200 | - mov r0, r0 | |
201 | - mcr p15, 0, r0, c1, c0, 0 @ write control reg | |
202 | - mrc p15, 0, r3, c0, c0, 0 @ read id reg | |
203 | - mov r3, r3 | |
204 | - mov r3, r13 | |
205 | - mov pc, r3 | |
206 | -ENDPROC(__turn_mmu_on) | |
207 | - | |
208 | - | |
209 | -/* | |
210 | 112 | * Setup the initial page tables. We only setup the barest |
211 | 113 | * amount which are required to get the kernel running, which |
212 | 114 | * generally means mapping in the kernel code. |
... | ... | @@ -216,7 +118,7 @@ |
216 | 118 | * r10 = procinfo |
217 | 119 | * |
218 | 120 | * Returns: |
219 | - * r0, r3, r6, r7 corrupted | |
121 | + * r0, r3, r5-r7 corrupted | |
220 | 122 | * r4 = physical page table address |
221 | 123 | */ |
222 | 124 | __create_page_tables: |
223 | 125 | |
224 | 126 | |
225 | 127 | |
... | ... | @@ -238,20 +140,30 @@ |
238 | 140 | ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags |
239 | 141 | |
240 | 142 | /* |
241 | - * Create identity mapping for first MB of kernel to | |
242 | - * cater for the MMU enable. This identity mapping | |
243 | - * will be removed by paging_init(). We use our current program | |
244 | - * counter to determine corresponding section base address. | |
143 | + * Create identity mapping to cater for __enable_mmu. | |
144 | + * This identity mapping will be removed by paging_init(). | |
245 | 145 | */ |
246 | - mov r6, pc | |
247 | - mov r6, r6, lsr #20 @ start of kernel section | |
248 | - orr r3, r7, r6, lsl #20 @ flags + kernel base | |
249 | - str r3, [r4, r6, lsl #2] @ identity mapping | |
146 | + adr r0, __enable_mmu_loc | |
147 | + ldmia r0, {r3, r5, r6} | |
148 | + sub r0, r0, r3 @ virt->phys offset | |
149 | + add r5, r5, r0 @ phys __enable_mmu | |
150 | + add r6, r6, r0 @ phys __enable_mmu_end | |
151 | + mov r5, r5, lsr #20 | |
152 | + mov r6, r6, lsr #20 | |
250 | 153 | |
154 | +1: orr r3, r7, r5, lsl #20 @ flags + kernel base | |
155 | + str r3, [r4, r5, lsl #2] @ identity mapping | |
156 | + teq r5, r6 | |
157 | + addne r5, r5, #1 @ next section | |
158 | + bne 1b | |
159 | + | |
251 | 160 | /* |
252 | 161 | * Now setup the pagetables for our kernel direct |
253 | 162 | * mapped region. |
254 | 163 | */ |
164 | + mov r3, pc | |
165 | + mov r3, r3, lsr #20 | |
166 | + orr r3, r7, r3, lsl #20 | |
255 | 167 | add r0, r4, #(KERNEL_START & 0xff000000) >> 18 |
256 | 168 | str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! |
257 | 169 | ldr r6, =(KERNEL_END - 1) |
... | ... | @@ -335,6 +247,122 @@ |
335 | 247 | mov pc, lr |
336 | 248 | ENDPROC(__create_page_tables) |
337 | 249 | .ltorg |
250 | +__enable_mmu_loc: | |
251 | + .long . | |
252 | + .long __enable_mmu | |
253 | + .long __enable_mmu_end | |
254 | + | |
255 | +#if defined(CONFIG_SMP) | |
256 | + __CPUINIT | |
257 | +ENTRY(secondary_startup) | |
258 | + /* | |
259 | + * Common entry point for secondary CPUs. | |
260 | + * | |
261 | + * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | |
262 | + * the processor type - there is no need to check the machine type | |
263 | + * as it has already been validated by the primary processor. | |
264 | + */ | |
265 | + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | |
266 | + mrc p15, 0, r9, c0, c0 @ get processor id | |
267 | + bl __lookup_processor_type | |
268 | + movs r10, r5 @ invalid processor? | |
269 | + moveq r0, #'p' @ yes, error 'p' | |
270 | + beq __error_p | |
271 | + | |
272 | + /* | |
273 | + * Use the page tables supplied from __cpu_up. | |
274 | + */ | |
275 | + adr r4, __secondary_data | |
276 | + ldmia r4, {r5, r7, r12} @ address to jump to after | |
277 | + sub r4, r4, r5 @ mmu has been enabled | |
278 | + ldr r4, [r7, r4] @ get secondary_data.pgdir | |
279 | + adr lr, BSYM(__enable_mmu) @ return address | |
280 | + mov r13, r12 @ __secondary_switched address | |
281 | + ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor | |
282 | + @ (return control reg) | |
283 | + THUMB( add r12, r10, #PROCINFO_INITFUNC ) | |
284 | + THUMB( mov pc, r12 ) | |
285 | +ENDPROC(secondary_startup) | |
286 | + | |
287 | + /* | |
288 | + * r6 = &secondary_data | |
289 | + */ | |
290 | +ENTRY(__secondary_switched) | |
291 | + ldr sp, [r7, #4] @ get secondary_data.stack | |
292 | + mov fp, #0 | |
293 | + b secondary_start_kernel | |
294 | +ENDPROC(__secondary_switched) | |
295 | + | |
296 | + .type __secondary_data, %object | |
297 | +__secondary_data: | |
298 | + .long . | |
299 | + .long secondary_data | |
300 | + .long __secondary_switched | |
301 | +#endif /* defined(CONFIG_SMP) */ | |
302 | + | |
303 | + | |
304 | + | |
305 | +/* | |
306 | + * Setup common bits before finally enabling the MMU. Essentially | |
307 | + * this is just loading the page table pointer and domain access | |
308 | + * registers. | |
309 | + * | |
310 | + * r0 = cp#15 control register | |
311 | + * r1 = machine ID | |
312 | + * r2 = atags pointer | |
313 | + * r4 = page table pointer | |
314 | + * r9 = processor ID | |
315 | + * r13 = *virtual* address to jump to upon completion | |
316 | + */ | |
317 | +__enable_mmu: | |
318 | +#ifdef CONFIG_ALIGNMENT_TRAP | |
319 | + orr r0, r0, #CR_A | |
320 | +#else | |
321 | + bic r0, r0, #CR_A | |
322 | +#endif | |
323 | +#ifdef CONFIG_CPU_DCACHE_DISABLE | |
324 | + bic r0, r0, #CR_C | |
325 | +#endif | |
326 | +#ifdef CONFIG_CPU_BPREDICT_DISABLE | |
327 | + bic r0, r0, #CR_Z | |
328 | +#endif | |
329 | +#ifdef CONFIG_CPU_ICACHE_DISABLE | |
330 | + bic r0, r0, #CR_I | |
331 | +#endif | |
332 | + mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | |
333 | + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | |
334 | + domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | |
335 | + domain_val(DOMAIN_IO, DOMAIN_CLIENT)) | |
336 | + mcr p15, 0, r5, c3, c0, 0 @ load domain access register | |
337 | + mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | |
338 | + b __turn_mmu_on | |
339 | +ENDPROC(__enable_mmu) | |
340 | + | |
341 | +/* | |
342 | + * Enable the MMU. This completely changes the structure of the visible | |
343 | + * memory space. You will not be able to trace execution through this. | |
344 | + * If you have an enquiry about this, *please* check the linux-arm-kernel | |
345 | + * mailing list archives BEFORE sending another post to the list. | |
346 | + * | |
347 | + * r0 = cp#15 control register | |
348 | + * r1 = machine ID | |
349 | + * r2 = atags pointer | |
350 | + * r9 = processor ID | |
351 | + * r13 = *virtual* address to jump to upon completion | |
352 | + * | |
353 | + * other registers depend on the function called upon completion | |
354 | + */ | |
355 | + .align 5 | |
356 | +__turn_mmu_on: | |
357 | + mov r0, r0 | |
358 | + mcr p15, 0, r0, c1, c0, 0 @ write control reg | |
359 | + mrc p15, 0, r3, c0, c0, 0 @ read id reg | |
360 | + mov r3, r3 | |
361 | + mov r3, r13 | |
362 | + mov pc, r3 | |
363 | +__enable_mmu_end: | |
364 | +ENDPROC(__turn_mmu_on) | |
365 | + | |
338 | 366 | |
339 | 367 | #ifdef CONFIG_SMP_ON_UP |
340 | 368 | __fixup_smp: |
arch/arm/kernel/smp.c
... | ... | @@ -33,6 +33,7 @@ |
33 | 33 | #include <asm/pgtable.h> |
34 | 34 | #include <asm/pgalloc.h> |
35 | 35 | #include <asm/processor.h> |
36 | +#include <asm/sections.h> | |
36 | 37 | #include <asm/tlbflush.h> |
37 | 38 | #include <asm/ptrace.h> |
38 | 39 | #include <asm/localtimer.h> |
39 | 40 | |
... | ... | @@ -67,12 +68,47 @@ |
67 | 68 | IPI_CPU_STOP, |
68 | 69 | }; |
69 | 70 | |
71 | +static inline void identity_mapping_add(pgd_t *pgd, unsigned long start, | |
72 | + unsigned long end) | |
73 | +{ | |
74 | + unsigned long addr, prot; | |
75 | + pmd_t *pmd; | |
76 | + | |
77 | + prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; | |
78 | + if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | |
79 | + prot |= PMD_BIT4; | |
80 | + | |
81 | + for (addr = start & PGDIR_MASK; addr < end;) { | |
82 | + pmd = pmd_offset(pgd + pgd_index(addr), addr); | |
83 | + pmd[0] = __pmd(addr | prot); | |
84 | + addr += SECTION_SIZE; | |
85 | + pmd[1] = __pmd(addr | prot); | |
86 | + addr += SECTION_SIZE; | |
87 | + flush_pmd_entry(pmd); | |
88 | + outer_clean_range(__pa(pmd), __pa(pmd + 1)); | |
89 | + } | |
90 | +} | |
91 | + | |
92 | +static inline void identity_mapping_del(pgd_t *pgd, unsigned long start, | |
93 | + unsigned long end) | |
94 | +{ | |
95 | + unsigned long addr; | |
96 | + pmd_t *pmd; | |
97 | + | |
98 | + for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) { | |
99 | + pmd = pmd_offset(pgd + pgd_index(addr), addr); | |
100 | + pmd[0] = __pmd(0); | |
101 | + pmd[1] = __pmd(0); | |
102 | + clean_pmd_entry(pmd); | |
103 | + outer_clean_range(__pa(pmd), __pa(pmd + 1)); | |
104 | + } | |
105 | +} | |
106 | + | |
70 | 107 | int __cpuinit __cpu_up(unsigned int cpu) |
71 | 108 | { |
72 | 109 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
73 | 110 | struct task_struct *idle = ci->idle; |
74 | 111 | pgd_t *pgd; |
75 | - pmd_t *pmd; | |
76 | 112 | int ret; |
77 | 113 | |
78 | 114 | /* |
79 | 115 | |
... | ... | @@ -101,12 +137,17 @@ |
101 | 137 | * a 1:1 mapping for the physical address of the kernel. |
102 | 138 | */ |
103 | 139 | pgd = pgd_alloc(&init_mm); |
104 | - pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); | |
105 | - *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | | |
106 | - PMD_TYPE_SECT | PMD_SECT_AP_WRITE); | |
107 | - flush_pmd_entry(pmd); | |
108 | - outer_clean_range(__pa(pmd), __pa(pmd + 1)); | |
140 | + if (!pgd) | |
141 | + return -ENOMEM; | |
109 | 142 | |
143 | + if (PHYS_OFFSET != PAGE_OFFSET) { | |
144 | +#ifndef CONFIG_HOTPLUG_CPU | |
145 | + identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); | |
146 | +#endif | |
147 | + identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); | |
148 | + identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); | |
149 | + } | |
150 | + | |
110 | 151 | /* |
111 | 152 | * We need to tell the secondary core where to find |
112 | 153 | * its stack and the page tables. |
... | ... | @@ -143,8 +184,14 @@ |
143 | 184 | secondary_data.stack = NULL; |
144 | 185 | secondary_data.pgdir = 0; |
145 | 186 | |
146 | - *pmd = __pmd(0); | |
147 | - clean_pmd_entry(pmd); | |
187 | + if (PHYS_OFFSET != PAGE_OFFSET) { | |
188 | +#ifndef CONFIG_HOTPLUG_CPU | |
189 | + identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); | |
190 | +#endif | |
191 | + identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); | |
192 | + identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); | |
193 | + } | |
194 | + | |
148 | 195 | pgd_free(&init_mm, pgd); |
149 | 196 | |
150 | 197 | if (ret) { |
arch/arm/kernel/vmlinux.lds.S
... | ... | @@ -8,6 +8,19 @@ |
8 | 8 | #include <asm/memory.h> |
9 | 9 | #include <asm/page.h> |
10 | 10 | |
11 | +#define PROC_INFO \ | |
12 | + VMLINUX_SYMBOL(__proc_info_begin) = .; \ | |
13 | + *(.proc.info.init) \ | |
14 | + VMLINUX_SYMBOL(__proc_info_end) = .; | |
15 | + | |
16 | +#ifdef CONFIG_HOTPLUG_CPU | |
17 | +#define ARM_CPU_DISCARD(x) | |
18 | +#define ARM_CPU_KEEP(x) x | |
19 | +#else | |
20 | +#define ARM_CPU_DISCARD(x) x | |
21 | +#define ARM_CPU_KEEP(x) | |
22 | +#endif | |
23 | + | |
11 | 24 | OUTPUT_ARCH(arm) |
12 | 25 | ENTRY(stext) |
13 | 26 | |
... | ... | @@ -31,9 +44,7 @@ |
31 | 44 | HEAD_TEXT |
32 | 45 | INIT_TEXT |
33 | 46 | _einittext = .; |
34 | - __proc_info_begin = .; | |
35 | - *(.proc.info.init) | |
36 | - __proc_info_end = .; | |
47 | + ARM_CPU_DISCARD(PROC_INFO) | |
37 | 48 | __arch_info_begin = .; |
38 | 49 | *(.arch.info.init) |
39 | 50 | __arch_info_end = .; |
... | ... | @@ -73,10 +84,8 @@ |
73 | 84 | /DISCARD/ : { |
74 | 85 | *(.ARM.exidx.exit.text) |
75 | 86 | *(.ARM.extab.exit.text) |
76 | -#ifndef CONFIG_HOTPLUG_CPU | |
77 | - *(.ARM.exidx.cpuexit.text) | |
78 | - *(.ARM.extab.cpuexit.text) | |
79 | -#endif | |
87 | + ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) | |
88 | + ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) | |
80 | 89 | #ifndef CONFIG_HOTPLUG |
81 | 90 | *(.ARM.exidx.devexit.text) |
82 | 91 | *(.ARM.extab.devexit.text) |
... | ... | @@ -105,6 +114,7 @@ |
105 | 114 | *(.glue_7) |
106 | 115 | *(.glue_7t) |
107 | 116 | *(.got) /* Global offset table */ |
117 | + ARM_CPU_KEEP(PROC_INFO) | |
108 | 118 | } |
109 | 119 | |
110 | 120 | RO_DATA(PAGE_SIZE) |
arch/arm/mm/proc-arm1020.S
arch/arm/mm/proc-arm1020e.S
arch/arm/mm/proc-arm1022.S
arch/arm/mm/proc-arm1026.S
arch/arm/mm/proc-arm6_7.S
arch/arm/mm/proc-arm720.S
arch/arm/mm/proc-arm740.S
arch/arm/mm/proc-arm7tdmi.S
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm922.S
arch/arm/mm/proc-arm925.S
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-arm940.S
arch/arm/mm/proc-arm946.S
arch/arm/mm/proc-arm9tdmi.S
arch/arm/mm/proc-fa526.S
arch/arm/mm/proc-feroceon.S
arch/arm/mm/proc-mohawk.S
arch/arm/mm/proc-sa110.S
arch/arm/mm/proc-sa1100.S
arch/arm/mm/proc-v6.S
... | ... | @@ -135,7 +135,7 @@ |
135 | 135 | |
136 | 136 | .align |
137 | 137 | |
138 | - __INIT | |
138 | + __CPUINIT | |
139 | 139 | |
140 | 140 | /* |
141 | 141 | * __v6_setup |
... | ... | @@ -193,6 +193,8 @@ |
193 | 193 | v6_crval: |
194 | 194 | crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c |
195 | 195 | |
196 | + __INITDATA | |
197 | + | |
196 | 198 | .type v6_processor_functions, #object |
197 | 199 | ENTRY(v6_processor_functions) |
198 | 200 | .word v6_early_abort |
... | ... | @@ -205,6 +207,8 @@ |
205 | 207 | .word cpu_v6_switch_mm |
206 | 208 | .word cpu_v6_set_pte_ext |
207 | 209 | .size v6_processor_functions, . - v6_processor_functions |
210 | + | |
211 | + .section ".rodata" | |
208 | 212 | |
209 | 213 | .type cpu_arch_name, #object |
210 | 214 | cpu_arch_name: |
arch/arm/mm/proc-v7.S
... | ... | @@ -168,7 +168,7 @@ |
168 | 168 | .ascii "ARMv7 Processor" |
169 | 169 | .align |
170 | 170 | |
171 | - __INIT | |
171 | + __CPUINIT | |
172 | 172 | |
173 | 173 | /* |
174 | 174 | * __v7_setup |
... | ... | @@ -325,6 +325,8 @@ |
325 | 325 | __v7_setup_stack: |
326 | 326 | .space 4 * 11 @ 11 registers |
327 | 327 | |
328 | + __INITDATA | |
329 | + | |
328 | 330 | .type v7_processor_functions, #object |
329 | 331 | ENTRY(v7_processor_functions) |
330 | 332 | .word v7_early_abort |
... | ... | @@ -337,6 +339,8 @@ |
337 | 339 | .word cpu_v7_switch_mm |
338 | 340 | .word cpu_v7_set_pte_ext |
339 | 341 | .size v7_processor_functions, . - v7_processor_functions |
342 | + | |
343 | + .section ".rodata" | |
340 | 344 | |
341 | 345 | .type cpu_arch_name, #object |
342 | 346 | cpu_arch_name: |
arch/arm/mm/proc-xsc3.S