Commit 0f7b332f9777819a39a3b325690379a7efef89d1
1 parent
43b3e18982
Exists in
master
and in
39 other branches
ARM: consolidate SMP cross call implementation
Rather than having each platform class provide a mach/smp.h header for smp_cross_call(), arrange for them to register the function with the core ARM SMP code instead. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Showing 19 changed files with 37 additions and 162 deletions Inline Diff
- arch/arm/include/asm/smp.h
- arch/arm/kernel/smp.c
- arch/arm/mach-exynos4/include/mach/smp.h
- arch/arm/mach-exynos4/platsmp.c
- arch/arm/mach-msm/include/mach/smp.h
- arch/arm/mach-msm/platsmp.c
- arch/arm/mach-omap2/omap-smp.c
- arch/arm/mach-realview/include/mach/smp.h
- arch/arm/mach-realview/platsmp.c
- arch/arm/mach-shmobile/include/mach/smp.h
- arch/arm/mach-shmobile/platsmp.c
- arch/arm/mach-tegra/include/mach/smp.h
- arch/arm/mach-tegra/platsmp.c
- arch/arm/mach-ux500/include/mach/smp.h
- arch/arm/mach-ux500/platsmp.c
- arch/arm/mach-vexpress/ct-ca9x4.c
- arch/arm/mach-vexpress/include/mach/smp.h
- arch/arm/plat-omap/include/plat/smp.h
- arch/arm/plat-versatile/platsmp.c
arch/arm/include/asm/smp.h
1 | /* | 1 | /* |
2 | * arch/arm/include/asm/smp.h | 2 | * arch/arm/include/asm/smp.h |
3 | * | 3 | * |
4 | * Copyright (C) 2004-2005 ARM Ltd. | 4 | * Copyright (C) 2004-2005 ARM Ltd. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #ifndef __ASM_ARM_SMP_H | 10 | #ifndef __ASM_ARM_SMP_H |
11 | #define __ASM_ARM_SMP_H | 11 | #define __ASM_ARM_SMP_H |
12 | 12 | ||
13 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
14 | #include <linux/cpumask.h> | 14 | #include <linux/cpumask.h> |
15 | #include <linux/thread_info.h> | 15 | #include <linux/thread_info.h> |
16 | 16 | ||
17 | #include <mach/smp.h> | ||
18 | |||
19 | #ifndef CONFIG_SMP | 17 | #ifndef CONFIG_SMP |
20 | # error "<asm/smp.h> included in non-SMP build" | 18 | # error "<asm/smp.h> included in non-SMP build" |
21 | #endif | 19 | #endif |
22 | 20 | ||
23 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 21 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
24 | 22 | ||
25 | /* | 23 | /* |
26 | * at the moment, there's not a big penalty for changing CPUs | 24 | * at the moment, there's not a big penalty for changing CPUs |
27 | * (the >big< penalty is running SMP in the first place) | 25 | * (the >big< penalty is running SMP in the first place) |
28 | */ | 26 | */ |
29 | #define PROC_CHANGE_PENALTY 15 | 27 | #define PROC_CHANGE_PENALTY 15 |
30 | 28 | ||
31 | struct seq_file; | 29 | struct seq_file; |
32 | 30 | ||
33 | /* | 31 | /* |
34 | * generate IPI list text | 32 | * generate IPI list text |
35 | */ | 33 | */ |
36 | extern void show_ipi_list(struct seq_file *, int); | 34 | extern void show_ipi_list(struct seq_file *, int); |
37 | 35 | ||
38 | /* | 36 | /* |
39 | * Called from assembly code, this handles an IPI. | 37 | * Called from assembly code, this handles an IPI. |
40 | */ | 38 | */ |
41 | asmlinkage void do_IPI(int ipinr, struct pt_regs *regs); | 39 | asmlinkage void do_IPI(int ipinr, struct pt_regs *regs); |
42 | 40 | ||
43 | /* | 41 | /* |
44 | * Setup the set of possible CPUs (via set_cpu_possible) | 42 | * Setup the set of possible CPUs (via set_cpu_possible) |
45 | */ | 43 | */ |
46 | extern void smp_init_cpus(void); | 44 | extern void smp_init_cpus(void); |
47 | 45 | ||
48 | 46 | ||
49 | /* | 47 | /* |
50 | * Raise an IPI cross call on CPUs in callmap. | 48 | * Provide a function to raise an IPI cross call on CPUs in callmap. |
51 | */ | 49 | */ |
52 | extern void smp_cross_call(const struct cpumask *mask, int ipi); | 50 | extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); |
53 | 51 | ||
54 | /* | 52 | /* |
55 | * Boot a secondary CPU, and assign it the specified idle task. | 53 | * Boot a secondary CPU, and assign it the specified idle task. |
56 | * This also gives us the initial stack to use for this CPU. | 54 | * This also gives us the initial stack to use for this CPU. |
57 | */ | 55 | */ |
58 | extern int boot_secondary(unsigned int cpu, struct task_struct *); | 56 | extern int boot_secondary(unsigned int cpu, struct task_struct *); |
59 | 57 | ||
60 | /* | 58 | /* |
61 | * Called from platform specific assembly code, this is the | 59 | * Called from platform specific assembly code, this is the |
62 | * secondary CPU entry point. | 60 | * secondary CPU entry point. |
63 | */ | 61 | */ |
64 | asmlinkage void secondary_start_kernel(void); | 62 | asmlinkage void secondary_start_kernel(void); |
65 | 63 | ||
66 | /* | 64 | /* |
67 | * Perform platform specific initialisation of the specified CPU. | 65 | * Perform platform specific initialisation of the specified CPU. |
68 | */ | 66 | */ |
69 | extern void platform_secondary_init(unsigned int cpu); | 67 | extern void platform_secondary_init(unsigned int cpu); |
70 | 68 | ||
71 | /* | 69 | /* |
72 | * Initialize cpu_possible map, and enable coherency | 70 | * Initialize cpu_possible map, and enable coherency |
73 | */ | 71 | */ |
74 | extern void platform_smp_prepare_cpus(unsigned int); | 72 | extern void platform_smp_prepare_cpus(unsigned int); |
75 | 73 | ||
76 | /* | 74 | /* |
77 | * Initial data for bringing up a secondary CPU. | 75 | * Initial data for bringing up a secondary CPU. |
78 | */ | 76 | */ |
79 | struct secondary_data { | 77 | struct secondary_data { |
80 | unsigned long pgdir; | 78 | unsigned long pgdir; |
81 | void *stack; | 79 | void *stack; |
82 | }; | 80 | }; |
83 | extern struct secondary_data secondary_data; | 81 | extern struct secondary_data secondary_data; |
84 | 82 | ||
85 | extern int __cpu_disable(void); | 83 | extern int __cpu_disable(void); |
86 | extern int platform_cpu_disable(unsigned int cpu); | 84 | extern int platform_cpu_disable(unsigned int cpu); |
87 | 85 | ||
88 | extern void __cpu_die(unsigned int cpu); | 86 | extern void __cpu_die(unsigned int cpu); |
89 | extern void cpu_die(void); | 87 | extern void cpu_die(void); |
90 | 88 | ||
91 | extern void platform_cpu_die(unsigned int cpu); | 89 | extern void platform_cpu_die(unsigned int cpu); |
92 | extern int platform_cpu_kill(unsigned int cpu); | 90 | extern int platform_cpu_kill(unsigned int cpu); |
93 | extern void platform_cpu_enable(unsigned int cpu); | 91 | extern void platform_cpu_enable(unsigned int cpu); |
94 | 92 | ||
95 | extern void arch_send_call_function_single_ipi(int cpu); | 93 | extern void arch_send_call_function_single_ipi(int cpu); |
96 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 94 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
97 | 95 | ||
98 | /* | 96 | /* |
99 | * show local interrupt info | 97 | * show local interrupt info |
100 | */ | 98 | */ |
101 | extern void show_local_irqs(struct seq_file *, int); | 99 | extern void show_local_irqs(struct seq_file *, int); |
102 | 100 | ||
103 | #endif /* ifndef __ASM_ARM_SMP_H */ | 101 | #endif /* ifndef __ASM_ARM_SMP_H */ |
104 | 102 |
arch/arm/kernel/smp.c
1 | /* | 1 | /* |
2 | * linux/arch/arm/kernel/smp.c | 2 | * linux/arch/arm/kernel/smp.c |
3 | * | 3 | * |
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | 4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/cache.h> | 16 | #include <linux/cache.h> |
17 | #include <linux/profile.h> | 17 | #include <linux/profile.h> |
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <linux/ftrace.h> | 19 | #include <linux/ftrace.h> |
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #include <linux/smp.h> | 23 | #include <linux/smp.h> |
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/percpu.h> | 26 | #include <linux/percpu.h> |
27 | #include <linux/clockchips.h> | 27 | #include <linux/clockchips.h> |
28 | #include <linux/completion.h> | 28 | #include <linux/completion.h> |
29 | 29 | ||
30 | #include <asm/atomic.h> | 30 | #include <asm/atomic.h> |
31 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
32 | #include <asm/cpu.h> | 32 | #include <asm/cpu.h> |
33 | #include <asm/cputype.h> | 33 | #include <asm/cputype.h> |
34 | #include <asm/mmu_context.h> | 34 | #include <asm/mmu_context.h> |
35 | #include <asm/pgtable.h> | 35 | #include <asm/pgtable.h> |
36 | #include <asm/pgalloc.h> | 36 | #include <asm/pgalloc.h> |
37 | #include <asm/processor.h> | 37 | #include <asm/processor.h> |
38 | #include <asm/sections.h> | 38 | #include <asm/sections.h> |
39 | #include <asm/tlbflush.h> | 39 | #include <asm/tlbflush.h> |
40 | #include <asm/ptrace.h> | 40 | #include <asm/ptrace.h> |
41 | #include <asm/localtimer.h> | 41 | #include <asm/localtimer.h> |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * as from 2.5, kernels no longer have an init_tasks structure | 44 | * as from 2.5, kernels no longer have an init_tasks structure |
45 | * so we need some other way of telling a new secondary core | 45 | * so we need some other way of telling a new secondary core |
46 | * where to place its SVC stack | 46 | * where to place its SVC stack |
47 | */ | 47 | */ |
48 | struct secondary_data secondary_data; | 48 | struct secondary_data secondary_data; |
49 | 49 | ||
50 | enum ipi_msg_type { | 50 | enum ipi_msg_type { |
51 | IPI_TIMER = 2, | 51 | IPI_TIMER = 2, |
52 | IPI_RESCHEDULE, | 52 | IPI_RESCHEDULE, |
53 | IPI_CALL_FUNC, | 53 | IPI_CALL_FUNC, |
54 | IPI_CALL_FUNC_SINGLE, | 54 | IPI_CALL_FUNC_SINGLE, |
55 | IPI_CPU_STOP, | 55 | IPI_CPU_STOP, |
56 | }; | 56 | }; |
57 | 57 | ||
58 | int __cpuinit __cpu_up(unsigned int cpu) | 58 | int __cpuinit __cpu_up(unsigned int cpu) |
59 | { | 59 | { |
60 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); | 60 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
61 | struct task_struct *idle = ci->idle; | 61 | struct task_struct *idle = ci->idle; |
62 | pgd_t *pgd; | 62 | pgd_t *pgd; |
63 | int ret; | 63 | int ret; |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Spawn a new process manually, if not already done. | 66 | * Spawn a new process manually, if not already done. |
67 | * Grab a pointer to its task struct so we can mess with it | 67 | * Grab a pointer to its task struct so we can mess with it |
68 | */ | 68 | */ |
69 | if (!idle) { | 69 | if (!idle) { |
70 | idle = fork_idle(cpu); | 70 | idle = fork_idle(cpu); |
71 | if (IS_ERR(idle)) { | 71 | if (IS_ERR(idle)) { |
72 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); | 72 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); |
73 | return PTR_ERR(idle); | 73 | return PTR_ERR(idle); |
74 | } | 74 | } |
75 | ci->idle = idle; | 75 | ci->idle = idle; |
76 | } else { | 76 | } else { |
77 | /* | 77 | /* |
78 | * Since this idle thread is being re-used, call | 78 | * Since this idle thread is being re-used, call |
79 | * init_idle() to reinitialize the thread structure. | 79 | * init_idle() to reinitialize the thread structure. |
80 | */ | 80 | */ |
81 | init_idle(idle, cpu); | 81 | init_idle(idle, cpu); |
82 | } | 82 | } |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * Allocate initial page tables to allow the new CPU to | 85 | * Allocate initial page tables to allow the new CPU to |
86 | * enable the MMU safely. This essentially means a set | 86 | * enable the MMU safely. This essentially means a set |
87 | * of our "standard" page tables, with the addition of | 87 | * of our "standard" page tables, with the addition of |
88 | * a 1:1 mapping for the physical address of the kernel. | 88 | * a 1:1 mapping for the physical address of the kernel. |
89 | */ | 89 | */ |
90 | pgd = pgd_alloc(&init_mm); | 90 | pgd = pgd_alloc(&init_mm); |
91 | if (!pgd) | 91 | if (!pgd) |
92 | return -ENOMEM; | 92 | return -ENOMEM; |
93 | 93 | ||
94 | if (PHYS_OFFSET != PAGE_OFFSET) { | 94 | if (PHYS_OFFSET != PAGE_OFFSET) { |
95 | #ifndef CONFIG_HOTPLUG_CPU | 95 | #ifndef CONFIG_HOTPLUG_CPU |
96 | identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); | 96 | identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); |
97 | #endif | 97 | #endif |
98 | identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); | 98 | identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); |
99 | identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); | 99 | identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); |
100 | } | 100 | } |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * We need to tell the secondary core where to find | 103 | * We need to tell the secondary core where to find |
104 | * its stack and the page tables. | 104 | * its stack and the page tables. |
105 | */ | 105 | */ |
106 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | 106 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
107 | secondary_data.pgdir = virt_to_phys(pgd); | 107 | secondary_data.pgdir = virt_to_phys(pgd); |
108 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); | 108 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
109 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); | 109 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Now bring the CPU into our world. | 112 | * Now bring the CPU into our world. |
113 | */ | 113 | */ |
114 | ret = boot_secondary(cpu, idle); | 114 | ret = boot_secondary(cpu, idle); |
115 | if (ret == 0) { | 115 | if (ret == 0) { |
116 | unsigned long timeout; | 116 | unsigned long timeout; |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * CPU was successfully started, wait for it | 119 | * CPU was successfully started, wait for it |
120 | * to come online or time out. | 120 | * to come online or time out. |
121 | */ | 121 | */ |
122 | timeout = jiffies + HZ; | 122 | timeout = jiffies + HZ; |
123 | while (time_before(jiffies, timeout)) { | 123 | while (time_before(jiffies, timeout)) { |
124 | if (cpu_online(cpu)) | 124 | if (cpu_online(cpu)) |
125 | break; | 125 | break; |
126 | 126 | ||
127 | udelay(10); | 127 | udelay(10); |
128 | barrier(); | 128 | barrier(); |
129 | } | 129 | } |
130 | 130 | ||
131 | if (!cpu_online(cpu)) { | 131 | if (!cpu_online(cpu)) { |
132 | pr_crit("CPU%u: failed to come online\n", cpu); | 132 | pr_crit("CPU%u: failed to come online\n", cpu); |
133 | ret = -EIO; | 133 | ret = -EIO; |
134 | } | 134 | } |
135 | } else { | 135 | } else { |
136 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | 136 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); |
137 | } | 137 | } |
138 | 138 | ||
139 | secondary_data.stack = NULL; | 139 | secondary_data.stack = NULL; |
140 | secondary_data.pgdir = 0; | 140 | secondary_data.pgdir = 0; |
141 | 141 | ||
142 | if (PHYS_OFFSET != PAGE_OFFSET) { | 142 | if (PHYS_OFFSET != PAGE_OFFSET) { |
143 | #ifndef CONFIG_HOTPLUG_CPU | 143 | #ifndef CONFIG_HOTPLUG_CPU |
144 | identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); | 144 | identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); |
145 | #endif | 145 | #endif |
146 | identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); | 146 | identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); |
147 | identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); | 147 | identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); |
148 | } | 148 | } |
149 | 149 | ||
150 | pgd_free(&init_mm, pgd); | 150 | pgd_free(&init_mm, pgd); |
151 | 151 | ||
152 | return ret; | 152 | return ret; |
153 | } | 153 | } |
154 | 154 | ||
155 | #ifdef CONFIG_HOTPLUG_CPU | 155 | #ifdef CONFIG_HOTPLUG_CPU |
156 | static void percpu_timer_stop(void); | 156 | static void percpu_timer_stop(void); |
157 | 157 | ||
158 | /* | 158 | /* |
159 | * __cpu_disable runs on the processor to be shutdown. | 159 | * __cpu_disable runs on the processor to be shutdown. |
160 | */ | 160 | */ |
161 | int __cpu_disable(void) | 161 | int __cpu_disable(void) |
162 | { | 162 | { |
163 | unsigned int cpu = smp_processor_id(); | 163 | unsigned int cpu = smp_processor_id(); |
164 | struct task_struct *p; | 164 | struct task_struct *p; |
165 | int ret; | 165 | int ret; |
166 | 166 | ||
167 | ret = platform_cpu_disable(cpu); | 167 | ret = platform_cpu_disable(cpu); |
168 | if (ret) | 168 | if (ret) |
169 | return ret; | 169 | return ret; |
170 | 170 | ||
171 | /* | 171 | /* |
172 | * Take this CPU offline. Once we clear this, we can't return, | 172 | * Take this CPU offline. Once we clear this, we can't return, |
173 | * and we must not schedule until we're ready to give up the cpu. | 173 | * and we must not schedule until we're ready to give up the cpu. |
174 | */ | 174 | */ |
175 | set_cpu_online(cpu, false); | 175 | set_cpu_online(cpu, false); |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * OK - migrate IRQs away from this CPU | 178 | * OK - migrate IRQs away from this CPU |
179 | */ | 179 | */ |
180 | migrate_irqs(); | 180 | migrate_irqs(); |
181 | 181 | ||
182 | /* | 182 | /* |
183 | * Stop the local timer for this CPU. | 183 | * Stop the local timer for this CPU. |
184 | */ | 184 | */ |
185 | percpu_timer_stop(); | 185 | percpu_timer_stop(); |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * Flush user cache and TLB mappings, and then remove this CPU | 188 | * Flush user cache and TLB mappings, and then remove this CPU |
189 | * from the vm mask set of all processes. | 189 | * from the vm mask set of all processes. |
190 | */ | 190 | */ |
191 | flush_cache_all(); | 191 | flush_cache_all(); |
192 | local_flush_tlb_all(); | 192 | local_flush_tlb_all(); |
193 | 193 | ||
194 | read_lock(&tasklist_lock); | 194 | read_lock(&tasklist_lock); |
195 | for_each_process(p) { | 195 | for_each_process(p) { |
196 | if (p->mm) | 196 | if (p->mm) |
197 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); | 197 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); |
198 | } | 198 | } |
199 | read_unlock(&tasklist_lock); | 199 | read_unlock(&tasklist_lock); |
200 | 200 | ||
201 | return 0; | 201 | return 0; |
202 | } | 202 | } |
203 | 203 | ||
204 | static DECLARE_COMPLETION(cpu_died); | 204 | static DECLARE_COMPLETION(cpu_died); |
205 | 205 | ||
206 | /* | 206 | /* |
207 | * called on the thread which is asking for a CPU to be shutdown - | 207 | * called on the thread which is asking for a CPU to be shutdown - |
208 | * waits until shutdown has completed, or it is timed out. | 208 | * waits until shutdown has completed, or it is timed out. |
209 | */ | 209 | */ |
210 | void __cpu_die(unsigned int cpu) | 210 | void __cpu_die(unsigned int cpu) |
211 | { | 211 | { |
212 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { | 212 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { |
213 | pr_err("CPU%u: cpu didn't die\n", cpu); | 213 | pr_err("CPU%u: cpu didn't die\n", cpu); |
214 | return; | 214 | return; |
215 | } | 215 | } |
216 | printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); | 216 | printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); |
217 | 217 | ||
218 | if (!platform_cpu_kill(cpu)) | 218 | if (!platform_cpu_kill(cpu)) |
219 | printk("CPU%u: unable to kill\n", cpu); | 219 | printk("CPU%u: unable to kill\n", cpu); |
220 | } | 220 | } |
221 | 221 | ||
222 | /* | 222 | /* |
223 | * Called from the idle thread for the CPU which has been shutdown. | 223 | * Called from the idle thread for the CPU which has been shutdown. |
224 | * | 224 | * |
225 | * Note that we disable IRQs here, but do not re-enable them | 225 | * Note that we disable IRQs here, but do not re-enable them |
226 | * before returning to the caller. This is also the behaviour | 226 | * before returning to the caller. This is also the behaviour |
227 | * of the other hotplug-cpu capable cores, so presumably coming | 227 | * of the other hotplug-cpu capable cores, so presumably coming |
228 | * out of idle fixes this. | 228 | * out of idle fixes this. |
229 | */ | 229 | */ |
230 | void __ref cpu_die(void) | 230 | void __ref cpu_die(void) |
231 | { | 231 | { |
232 | unsigned int cpu = smp_processor_id(); | 232 | unsigned int cpu = smp_processor_id(); |
233 | 233 | ||
234 | idle_task_exit(); | 234 | idle_task_exit(); |
235 | 235 | ||
236 | local_irq_disable(); | 236 | local_irq_disable(); |
237 | mb(); | 237 | mb(); |
238 | 238 | ||
239 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ | 239 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ |
240 | complete(&cpu_died); | 240 | complete(&cpu_died); |
241 | 241 | ||
242 | /* | 242 | /* |
243 | * actual CPU shutdown procedure is at least platform (if not | 243 | * actual CPU shutdown procedure is at least platform (if not |
244 | * CPU) specific. | 244 | * CPU) specific. |
245 | */ | 245 | */ |
246 | platform_cpu_die(cpu); | 246 | platform_cpu_die(cpu); |
247 | 247 | ||
248 | /* | 248 | /* |
249 | * Do not return to the idle loop - jump back to the secondary | 249 | * Do not return to the idle loop - jump back to the secondary |
250 | * cpu initialisation. There's some initialisation which needs | 250 | * cpu initialisation. There's some initialisation which needs |
251 | * to be repeated to undo the effects of taking the CPU offline. | 251 | * to be repeated to undo the effects of taking the CPU offline. |
252 | */ | 252 | */ |
253 | __asm__("mov sp, %0\n" | 253 | __asm__("mov sp, %0\n" |
254 | " mov fp, #0\n" | 254 | " mov fp, #0\n" |
255 | " b secondary_start_kernel" | 255 | " b secondary_start_kernel" |
256 | : | 256 | : |
257 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); | 257 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); |
258 | } | 258 | } |
259 | #endif /* CONFIG_HOTPLUG_CPU */ | 259 | #endif /* CONFIG_HOTPLUG_CPU */ |
260 | 260 | ||
261 | /* | 261 | /* |
262 | * Called by both boot and secondaries to move global data into | 262 | * Called by both boot and secondaries to move global data into |
263 | * per-processor storage. | 263 | * per-processor storage. |
264 | */ | 264 | */ |
265 | static void __cpuinit smp_store_cpu_info(unsigned int cpuid) | 265 | static void __cpuinit smp_store_cpu_info(unsigned int cpuid) |
266 | { | 266 | { |
267 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | 267 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); |
268 | 268 | ||
269 | cpu_info->loops_per_jiffy = loops_per_jiffy; | 269 | cpu_info->loops_per_jiffy = loops_per_jiffy; |
270 | } | 270 | } |
271 | 271 | ||
272 | /* | 272 | /* |
273 | * This is the secondary CPU boot entry. We're using this CPUs | 273 | * This is the secondary CPU boot entry. We're using this CPUs |
274 | * idle thread stack, but a set of temporary page tables. | 274 | * idle thread stack, but a set of temporary page tables. |
275 | */ | 275 | */ |
276 | asmlinkage void __cpuinit secondary_start_kernel(void) | 276 | asmlinkage void __cpuinit secondary_start_kernel(void) |
277 | { | 277 | { |
278 | struct mm_struct *mm = &init_mm; | 278 | struct mm_struct *mm = &init_mm; |
279 | unsigned int cpu = smp_processor_id(); | 279 | unsigned int cpu = smp_processor_id(); |
280 | 280 | ||
281 | printk("CPU%u: Booted secondary processor\n", cpu); | 281 | printk("CPU%u: Booted secondary processor\n", cpu); |
282 | 282 | ||
283 | /* | 283 | /* |
284 | * All kernel threads share the same mm context; grab a | 284 | * All kernel threads share the same mm context; grab a |
285 | * reference and switch to it. | 285 | * reference and switch to it. |
286 | */ | 286 | */ |
287 | atomic_inc(&mm->mm_count); | 287 | atomic_inc(&mm->mm_count); |
288 | current->active_mm = mm; | 288 | current->active_mm = mm; |
289 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 289 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
290 | cpu_switch_mm(mm->pgd, mm); | 290 | cpu_switch_mm(mm->pgd, mm); |
291 | enter_lazy_tlb(mm, current); | 291 | enter_lazy_tlb(mm, current); |
292 | local_flush_tlb_all(); | 292 | local_flush_tlb_all(); |
293 | 293 | ||
294 | cpu_init(); | 294 | cpu_init(); |
295 | preempt_disable(); | 295 | preempt_disable(); |
296 | trace_hardirqs_off(); | 296 | trace_hardirqs_off(); |
297 | 297 | ||
298 | /* | 298 | /* |
299 | * Give the platform a chance to do its own initialisation. | 299 | * Give the platform a chance to do its own initialisation. |
300 | */ | 300 | */ |
301 | platform_secondary_init(cpu); | 301 | platform_secondary_init(cpu); |
302 | 302 | ||
303 | /* | 303 | /* |
304 | * Enable local interrupts. | 304 | * Enable local interrupts. |
305 | */ | 305 | */ |
306 | notify_cpu_starting(cpu); | 306 | notify_cpu_starting(cpu); |
307 | local_irq_enable(); | 307 | local_irq_enable(); |
308 | local_fiq_enable(); | 308 | local_fiq_enable(); |
309 | 309 | ||
310 | /* | 310 | /* |
311 | * Setup the percpu timer for this CPU. | 311 | * Setup the percpu timer for this CPU. |
312 | */ | 312 | */ |
313 | percpu_timer_setup(); | 313 | percpu_timer_setup(); |
314 | 314 | ||
315 | calibrate_delay(); | 315 | calibrate_delay(); |
316 | 316 | ||
317 | smp_store_cpu_info(cpu); | 317 | smp_store_cpu_info(cpu); |
318 | 318 | ||
319 | /* | 319 | /* |
320 | * OK, now it's safe to let the boot CPU continue | 320 | * OK, now it's safe to let the boot CPU continue |
321 | */ | 321 | */ |
322 | set_cpu_online(cpu, true); | 322 | set_cpu_online(cpu, true); |
323 | 323 | ||
324 | /* | 324 | /* |
325 | * OK, it's off to the idle thread for us | 325 | * OK, it's off to the idle thread for us |
326 | */ | 326 | */ |
327 | cpu_idle(); | 327 | cpu_idle(); |
328 | } | 328 | } |
329 | 329 | ||
330 | void __init smp_cpus_done(unsigned int max_cpus) | 330 | void __init smp_cpus_done(unsigned int max_cpus) |
331 | { | 331 | { |
332 | int cpu; | 332 | int cpu; |
333 | unsigned long bogosum = 0; | 333 | unsigned long bogosum = 0; |
334 | 334 | ||
335 | for_each_online_cpu(cpu) | 335 | for_each_online_cpu(cpu) |
336 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | 336 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; |
337 | 337 | ||
338 | printk(KERN_INFO "SMP: Total of %d processors activated " | 338 | printk(KERN_INFO "SMP: Total of %d processors activated " |
339 | "(%lu.%02lu BogoMIPS).\n", | 339 | "(%lu.%02lu BogoMIPS).\n", |
340 | num_online_cpus(), | 340 | num_online_cpus(), |
341 | bogosum / (500000/HZ), | 341 | bogosum / (500000/HZ), |
342 | (bogosum / (5000/HZ)) % 100); | 342 | (bogosum / (5000/HZ)) % 100); |
343 | } | 343 | } |
344 | 344 | ||
345 | void __init smp_prepare_boot_cpu(void) | 345 | void __init smp_prepare_boot_cpu(void) |
346 | { | 346 | { |
347 | unsigned int cpu = smp_processor_id(); | 347 | unsigned int cpu = smp_processor_id(); |
348 | 348 | ||
349 | per_cpu(cpu_data, cpu).idle = current; | 349 | per_cpu(cpu_data, cpu).idle = current; |
350 | } | 350 | } |
351 | 351 | ||
352 | void __init smp_prepare_cpus(unsigned int max_cpus) | 352 | void __init smp_prepare_cpus(unsigned int max_cpus) |
353 | { | 353 | { |
354 | unsigned int ncores = num_possible_cpus(); | 354 | unsigned int ncores = num_possible_cpus(); |
355 | 355 | ||
356 | smp_store_cpu_info(smp_processor_id()); | 356 | smp_store_cpu_info(smp_processor_id()); |
357 | 357 | ||
358 | /* | 358 | /* |
359 | * are we trying to boot more cores than exist? | 359 | * are we trying to boot more cores than exist? |
360 | */ | 360 | */ |
361 | if (max_cpus > ncores) | 361 | if (max_cpus > ncores) |
362 | max_cpus = ncores; | 362 | max_cpus = ncores; |
363 | 363 | ||
364 | if (max_cpus > 1) { | 364 | if (max_cpus > 1) { |
365 | /* | 365 | /* |
366 | * Enable the local timer or broadcast device for the | 366 | * Enable the local timer or broadcast device for the |
367 | * boot CPU, but only if we have more than one CPU. | 367 | * boot CPU, but only if we have more than one CPU. |
368 | */ | 368 | */ |
369 | percpu_timer_setup(); | 369 | percpu_timer_setup(); |
370 | 370 | ||
371 | /* | 371 | /* |
372 | * Initialise the SCU if there are more than one CPU | 372 | * Initialise the SCU if there are more than one CPU |
373 | * and let them know where to start. | 373 | * and let them know where to start. |
374 | */ | 374 | */ |
375 | platform_smp_prepare_cpus(max_cpus); | 375 | platform_smp_prepare_cpus(max_cpus); |
376 | } | 376 | } |
377 | } | 377 | } |
378 | 378 | ||
379 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); | ||
380 | |||
381 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) | ||
382 | { | ||
383 | smp_cross_call = fn; | ||
384 | } | ||
385 | |||
379 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 386 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
380 | { | 387 | { |
381 | smp_cross_call(mask, IPI_CALL_FUNC); | 388 | smp_cross_call(mask, IPI_CALL_FUNC); |
382 | } | 389 | } |
383 | 390 | ||
384 | void arch_send_call_function_single_ipi(int cpu) | 391 | void arch_send_call_function_single_ipi(int cpu) |
385 | { | 392 | { |
386 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | 393 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
387 | } | 394 | } |
388 | 395 | ||
389 | static const char *ipi_types[NR_IPI] = { | 396 | static const char *ipi_types[NR_IPI] = { |
390 | #define S(x,s) [x - IPI_TIMER] = s | 397 | #define S(x,s) [x - IPI_TIMER] = s |
391 | S(IPI_TIMER, "Timer broadcast interrupts"), | 398 | S(IPI_TIMER, "Timer broadcast interrupts"), |
392 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), | 399 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), |
393 | S(IPI_CALL_FUNC, "Function call interrupts"), | 400 | S(IPI_CALL_FUNC, "Function call interrupts"), |
394 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), | 401 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), |
395 | S(IPI_CPU_STOP, "CPU stop interrupts"), | 402 | S(IPI_CPU_STOP, "CPU stop interrupts"), |
396 | }; | 403 | }; |
397 | 404 | ||
398 | void show_ipi_list(struct seq_file *p, int prec) | 405 | void show_ipi_list(struct seq_file *p, int prec) |
399 | { | 406 | { |
400 | unsigned int cpu, i; | 407 | unsigned int cpu, i; |
401 | 408 | ||
402 | for (i = 0; i < NR_IPI; i++) { | 409 | for (i = 0; i < NR_IPI; i++) { |
403 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); | 410 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); |
404 | 411 | ||
405 | for_each_present_cpu(cpu) | 412 | for_each_present_cpu(cpu) |
406 | seq_printf(p, "%10u ", | 413 | seq_printf(p, "%10u ", |
407 | __get_irq_stat(cpu, ipi_irqs[i])); | 414 | __get_irq_stat(cpu, ipi_irqs[i])); |
408 | 415 | ||
409 | seq_printf(p, " %s\n", ipi_types[i]); | 416 | seq_printf(p, " %s\n", ipi_types[i]); |
410 | } | 417 | } |
411 | } | 418 | } |
412 | 419 | ||
413 | u64 smp_irq_stat_cpu(unsigned int cpu) | 420 | u64 smp_irq_stat_cpu(unsigned int cpu) |
414 | { | 421 | { |
415 | u64 sum = 0; | 422 | u64 sum = 0; |
416 | int i; | 423 | int i; |
417 | 424 | ||
418 | for (i = 0; i < NR_IPI; i++) | 425 | for (i = 0; i < NR_IPI; i++) |
419 | sum += __get_irq_stat(cpu, ipi_irqs[i]); | 426 | sum += __get_irq_stat(cpu, ipi_irqs[i]); |
420 | 427 | ||
421 | #ifdef CONFIG_LOCAL_TIMERS | 428 | #ifdef CONFIG_LOCAL_TIMERS |
422 | sum += __get_irq_stat(cpu, local_timer_irqs); | 429 | sum += __get_irq_stat(cpu, local_timer_irqs); |
423 | #endif | 430 | #endif |
424 | 431 | ||
425 | return sum; | 432 | return sum; |
426 | } | 433 | } |
427 | 434 | ||
428 | /* | 435 | /* |
429 | * Timer (local or broadcast) support | 436 | * Timer (local or broadcast) support |
430 | */ | 437 | */ |
431 | static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); | 438 | static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); |
432 | 439 | ||
433 | static void ipi_timer(void) | 440 | static void ipi_timer(void) |
434 | { | 441 | { |
435 | struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); | 442 | struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); |
436 | irq_enter(); | 443 | irq_enter(); |
437 | evt->event_handler(evt); | 444 | evt->event_handler(evt); |
438 | irq_exit(); | 445 | irq_exit(); |
439 | } | 446 | } |
440 | 447 | ||
441 | #ifdef CONFIG_LOCAL_TIMERS | 448 | #ifdef CONFIG_LOCAL_TIMERS |
442 | asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs) | 449 | asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs) |
443 | { | 450 | { |
444 | struct pt_regs *old_regs = set_irq_regs(regs); | 451 | struct pt_regs *old_regs = set_irq_regs(regs); |
445 | int cpu = smp_processor_id(); | 452 | int cpu = smp_processor_id(); |
446 | 453 | ||
447 | if (local_timer_ack()) { | 454 | if (local_timer_ack()) { |
448 | __inc_irq_stat(cpu, local_timer_irqs); | 455 | __inc_irq_stat(cpu, local_timer_irqs); |
449 | ipi_timer(); | 456 | ipi_timer(); |
450 | } | 457 | } |
451 | 458 | ||
452 | set_irq_regs(old_regs); | 459 | set_irq_regs(old_regs); |
453 | } | 460 | } |
454 | 461 | ||
455 | void show_local_irqs(struct seq_file *p, int prec) | 462 | void show_local_irqs(struct seq_file *p, int prec) |
456 | { | 463 | { |
457 | unsigned int cpu; | 464 | unsigned int cpu; |
458 | 465 | ||
459 | seq_printf(p, "%*s: ", prec, "LOC"); | 466 | seq_printf(p, "%*s: ", prec, "LOC"); |
460 | 467 | ||
461 | for_each_present_cpu(cpu) | 468 | for_each_present_cpu(cpu) |
462 | seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs)); | 469 | seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs)); |
463 | 470 | ||
464 | seq_printf(p, " Local timer interrupts\n"); | 471 | seq_printf(p, " Local timer interrupts\n"); |
465 | } | 472 | } |
466 | #endif | 473 | #endif |
467 | 474 | ||
468 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 475 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
469 | static void smp_timer_broadcast(const struct cpumask *mask) | 476 | static void smp_timer_broadcast(const struct cpumask *mask) |
470 | { | 477 | { |
471 | smp_cross_call(mask, IPI_TIMER); | 478 | smp_cross_call(mask, IPI_TIMER); |
472 | } | 479 | } |
473 | #else | 480 | #else |
474 | #define smp_timer_broadcast NULL | 481 | #define smp_timer_broadcast NULL |
475 | #endif | 482 | #endif |
476 | 483 | ||
477 | static void broadcast_timer_set_mode(enum clock_event_mode mode, | 484 | static void broadcast_timer_set_mode(enum clock_event_mode mode, |
478 | struct clock_event_device *evt) | 485 | struct clock_event_device *evt) |
479 | { | 486 | { |
480 | } | 487 | } |
481 | 488 | ||
482 | static void broadcast_timer_setup(struct clock_event_device *evt) | 489 | static void broadcast_timer_setup(struct clock_event_device *evt) |
483 | { | 490 | { |
484 | evt->name = "dummy_timer"; | 491 | evt->name = "dummy_timer"; |
485 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | 492 | evt->features = CLOCK_EVT_FEAT_ONESHOT | |
486 | CLOCK_EVT_FEAT_PERIODIC | | 493 | CLOCK_EVT_FEAT_PERIODIC | |
487 | CLOCK_EVT_FEAT_DUMMY; | 494 | CLOCK_EVT_FEAT_DUMMY; |
488 | evt->rating = 400; | 495 | evt->rating = 400; |
489 | evt->mult = 1; | 496 | evt->mult = 1; |
490 | evt->set_mode = broadcast_timer_set_mode; | 497 | evt->set_mode = broadcast_timer_set_mode; |
491 | 498 | ||
492 | clockevents_register_device(evt); | 499 | clockevents_register_device(evt); |
493 | } | 500 | } |
494 | 501 | ||
495 | void __cpuinit percpu_timer_setup(void) | 502 | void __cpuinit percpu_timer_setup(void) |
496 | { | 503 | { |
497 | unsigned int cpu = smp_processor_id(); | 504 | unsigned int cpu = smp_processor_id(); |
498 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | 505 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); |
499 | 506 | ||
500 | evt->cpumask = cpumask_of(cpu); | 507 | evt->cpumask = cpumask_of(cpu); |
501 | evt->broadcast = smp_timer_broadcast; | 508 | evt->broadcast = smp_timer_broadcast; |
502 | 509 | ||
503 | if (local_timer_setup(evt)) | 510 | if (local_timer_setup(evt)) |
504 | broadcast_timer_setup(evt); | 511 | broadcast_timer_setup(evt); |
505 | } | 512 | } |
506 | 513 | ||
507 | #ifdef CONFIG_HOTPLUG_CPU | 514 | #ifdef CONFIG_HOTPLUG_CPU |
508 | /* | 515 | /* |
509 | * The generic clock events code purposely does not stop the local timer | 516 | * The generic clock events code purposely does not stop the local timer |
510 | * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it | 517 | * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it |
511 | * manually here. | 518 | * manually here. |
512 | */ | 519 | */ |
513 | static void percpu_timer_stop(void) | 520 | static void percpu_timer_stop(void) |
514 | { | 521 | { |
515 | unsigned int cpu = smp_processor_id(); | 522 | unsigned int cpu = smp_processor_id(); |
516 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | 523 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); |
517 | 524 | ||
518 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); | 525 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); |
519 | } | 526 | } |
520 | #endif | 527 | #endif |
521 | 528 | ||
522 | static DEFINE_SPINLOCK(stop_lock); | 529 | static DEFINE_SPINLOCK(stop_lock); |
523 | 530 | ||
524 | /* | 531 | /* |
525 | * ipi_cpu_stop - handle IPI from smp_send_stop() | 532 | * ipi_cpu_stop - handle IPI from smp_send_stop() |
526 | */ | 533 | */ |
527 | static void ipi_cpu_stop(unsigned int cpu) | 534 | static void ipi_cpu_stop(unsigned int cpu) |
528 | { | 535 | { |
529 | if (system_state == SYSTEM_BOOTING || | 536 | if (system_state == SYSTEM_BOOTING || |
530 | system_state == SYSTEM_RUNNING) { | 537 | system_state == SYSTEM_RUNNING) { |
531 | spin_lock(&stop_lock); | 538 | spin_lock(&stop_lock); |
532 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | 539 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); |
533 | dump_stack(); | 540 | dump_stack(); |
534 | spin_unlock(&stop_lock); | 541 | spin_unlock(&stop_lock); |
535 | } | 542 | } |
536 | 543 | ||
537 | set_cpu_online(cpu, false); | 544 | set_cpu_online(cpu, false); |
538 | 545 | ||
539 | local_fiq_disable(); | 546 | local_fiq_disable(); |
540 | local_irq_disable(); | 547 | local_irq_disable(); |
541 | 548 | ||
542 | while (1) | 549 | while (1) |
543 | cpu_relax(); | 550 | cpu_relax(); |
544 | } | 551 | } |
545 | 552 | ||
546 | /* | 553 | /* |
547 | * Main handler for inter-processor interrupts | 554 | * Main handler for inter-processor interrupts |
548 | */ | 555 | */ |
549 | asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) | 556 | asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) |
550 | { | 557 | { |
551 | unsigned int cpu = smp_processor_id(); | 558 | unsigned int cpu = smp_processor_id(); |
552 | struct pt_regs *old_regs = set_irq_regs(regs); | 559 | struct pt_regs *old_regs = set_irq_regs(regs); |
553 | 560 | ||
554 | if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) | 561 | if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) |
555 | __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); | 562 | __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); |
556 | 563 | ||
557 | switch (ipinr) { | 564 | switch (ipinr) { |
558 | case IPI_TIMER: | 565 | case IPI_TIMER: |
559 | ipi_timer(); | 566 | ipi_timer(); |
560 | break; | 567 | break; |
561 | 568 | ||
562 | case IPI_RESCHEDULE: | 569 | case IPI_RESCHEDULE: |
563 | /* | 570 | /* |
564 | * nothing more to do - eveything is | 571 | * nothing more to do - eveything is |
565 | * done on the interrupt return path | 572 | * done on the interrupt return path |
566 | */ | 573 | */ |
567 | break; | 574 | break; |
568 | 575 | ||
569 | case IPI_CALL_FUNC: | 576 | case IPI_CALL_FUNC: |
570 | generic_smp_call_function_interrupt(); | 577 | generic_smp_call_function_interrupt(); |
571 | break; | 578 | break; |
572 | 579 | ||
573 | case IPI_CALL_FUNC_SINGLE: | 580 | case IPI_CALL_FUNC_SINGLE: |
574 | generic_smp_call_function_single_interrupt(); | 581 | generic_smp_call_function_single_interrupt(); |
575 | break; | 582 | break; |
576 | 583 | ||
577 | case IPI_CPU_STOP: | 584 | case IPI_CPU_STOP: |
578 | ipi_cpu_stop(cpu); | 585 | ipi_cpu_stop(cpu); |
579 | break; | 586 | break; |
580 | 587 | ||
581 | default: | 588 | default: |
582 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", | 589 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", |
583 | cpu, ipinr); | 590 | cpu, ipinr); |
584 | break; | 591 | break; |
585 | } | 592 | } |
586 | set_irq_regs(old_regs); | 593 | set_irq_regs(old_regs); |
587 | } | 594 | } |
588 | 595 | ||
589 | void smp_send_reschedule(int cpu) | 596 | void smp_send_reschedule(int cpu) |
590 | { | 597 | { |
591 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); | 598 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
592 | } | 599 | } |
593 | 600 | ||
594 | void smp_send_stop(void) | 601 | void smp_send_stop(void) |
595 | { | 602 | { |
596 | unsigned long timeout; | 603 | unsigned long timeout; |
597 | 604 | ||
598 | if (num_online_cpus() > 1) { | 605 | if (num_online_cpus() > 1) { |
599 | cpumask_t mask = cpu_online_map; | 606 | cpumask_t mask = cpu_online_map; |
600 | cpu_clear(smp_processor_id(), mask); | 607 | cpu_clear(smp_processor_id(), mask); |
601 | 608 | ||
602 | smp_cross_call(&mask, IPI_CPU_STOP); | 609 | smp_cross_call(&mask, IPI_CPU_STOP); |
603 | } | 610 | } |
604 | 611 | ||
605 | /* Wait up to one second for other CPUs to stop */ | 612 | /* Wait up to one second for other CPUs to stop */ |
606 | timeout = USEC_PER_SEC; | 613 | timeout = USEC_PER_SEC; |
607 | while (num_online_cpus() > 1 && timeout--) | 614 | while (num_online_cpus() > 1 && timeout--) |
608 | udelay(1); | 615 | udelay(1); |
609 | 616 | ||
610 | if (num_online_cpus() > 1) | 617 | if (num_online_cpus() > 1) |
611 | pr_warning("SMP: failed to stop secondary CPUs\n"); | 618 | pr_warning("SMP: failed to stop secondary CPUs\n"); |
612 | } | 619 | } |
613 | 620 | ||
614 | /* | 621 | /* |
615 | * not supported here | 622 | * not supported here |
616 | */ | 623 | */ |
617 | int setup_profiling_timer(unsigned int multiplier) | 624 | int setup_profiling_timer(unsigned int multiplier) |
618 | { | 625 | { |
619 | return -EINVAL; | 626 | return -EINVAL; |
620 | } | 627 | } |
621 | 628 |
arch/arm/mach-exynos4/include/mach/smp.h
1 | /* linux/arch/arm/mach-exynos4/include/mach/smp.h | File was deleted | |
2 | * | ||
3 | * Cloned from arch/arm/mach-realview/include/mach/smp.h | ||
4 | */ | ||
5 | |||
6 | #ifndef ASM_ARCH_SMP_H | ||
7 | #define ASM_ARCH_SMP_H __FILE__ | ||
8 | |||
9 | #include <asm/hardware/gic.h> | ||
10 | |||
11 | /* | ||
12 | * We use IRQ1 as the IPI | ||
13 | */ | ||
14 | static inline void smp_cross_call(const struct cpumask *mask, int ipi) | ||
15 | { | ||
16 | gic_raise_softirq(mask, ipi); | ||
17 | } | ||
18 | |||
19 | #endif | ||
20 | 1 | /* linux/arch/arm/mach-exynos4/include/mach/smp.h |
arch/arm/mach-exynos4/platsmp.c
1 | /* linux/arch/arm/mach-exynos4/platsmp.c | 1 | /* linux/arch/arm/mach-exynos4/platsmp.c |
2 | * | 2 | * |
3 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com | 4 | * http://www.samsung.com |
5 | * | 5 | * |
6 | * Cloned from linux/arch/arm/mach-vexpress/platsmp.c | 6 | * Cloned from linux/arch/arm/mach-vexpress/platsmp.c |
7 | * | 7 | * |
8 | * Copyright (C) 2002 ARM Ltd. | 8 | * Copyright (C) 2002 ARM Ltd. |
9 | * All Rights Reserved | 9 | * All Rights Reserved |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | 12 | * it under the terms of the GNU General Public License version 2 as |
13 | * published by the Free Software Foundation. | 13 | * published by the Free Software Foundation. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
20 | #include <linux/jiffies.h> | 20 | #include <linux/jiffies.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | 23 | ||
24 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
25 | #include <asm/hardware/gic.h> | ||
25 | #include <asm/smp_scu.h> | 26 | #include <asm/smp_scu.h> |
26 | #include <asm/unified.h> | 27 | #include <asm/unified.h> |
27 | 28 | ||
28 | #include <mach/hardware.h> | 29 | #include <mach/hardware.h> |
29 | #include <mach/regs-clock.h> | 30 | #include <mach/regs-clock.h> |
30 | 31 | ||
31 | extern void exynos4_secondary_startup(void); | 32 | extern void exynos4_secondary_startup(void); |
32 | 33 | ||
33 | /* | 34 | /* |
34 | * control for which core is the next to come out of the secondary | 35 | * control for which core is the next to come out of the secondary |
35 | * boot "holding pen" | 36 | * boot "holding pen" |
36 | */ | 37 | */ |
37 | 38 | ||
38 | volatile int __cpuinitdata pen_release = -1; | 39 | volatile int __cpuinitdata pen_release = -1; |
39 | 40 | ||
40 | /* | 41 | /* |
41 | * Write pen_release in a way that is guaranteed to be visible to all | 42 | * Write pen_release in a way that is guaranteed to be visible to all |
42 | * observers, irrespective of whether they're taking part in coherency | 43 | * observers, irrespective of whether they're taking part in coherency |
43 | * or not. This is necessary for the hotplug code to work reliably. | 44 | * or not. This is necessary for the hotplug code to work reliably. |
44 | */ | 45 | */ |
45 | static void write_pen_release(int val) | 46 | static void write_pen_release(int val) |
46 | { | 47 | { |
47 | pen_release = val; | 48 | pen_release = val; |
48 | smp_wmb(); | 49 | smp_wmb(); |
49 | __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); | 50 | __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); |
50 | outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); | 51 | outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); |
51 | } | 52 | } |
52 | 53 | ||
53 | static void __iomem *scu_base_addr(void) | 54 | static void __iomem *scu_base_addr(void) |
54 | { | 55 | { |
55 | return (void __iomem *)(S5P_VA_SCU); | 56 | return (void __iomem *)(S5P_VA_SCU); |
56 | } | 57 | } |
57 | 58 | ||
58 | static DEFINE_SPINLOCK(boot_lock); | 59 | static DEFINE_SPINLOCK(boot_lock); |
59 | 60 | ||
60 | void __cpuinit platform_secondary_init(unsigned int cpu) | 61 | void __cpuinit platform_secondary_init(unsigned int cpu) |
61 | { | 62 | { |
62 | /* | 63 | /* |
63 | * if any interrupts are already enabled for the primary | 64 | * if any interrupts are already enabled for the primary |
64 | * core (e.g. timer irq), then they will not have been enabled | 65 | * core (e.g. timer irq), then they will not have been enabled |
65 | * for us: do so | 66 | * for us: do so |
66 | */ | 67 | */ |
67 | gic_secondary_init(0); | 68 | gic_secondary_init(0); |
68 | 69 | ||
69 | /* | 70 | /* |
70 | * let the primary processor know we're out of the | 71 | * let the primary processor know we're out of the |
71 | * pen, then head off into the C entry point | 72 | * pen, then head off into the C entry point |
72 | */ | 73 | */ |
73 | write_pen_release(-1); | 74 | write_pen_release(-1); |
74 | 75 | ||
75 | /* | 76 | /* |
76 | * Synchronise with the boot thread. | 77 | * Synchronise with the boot thread. |
77 | */ | 78 | */ |
78 | spin_lock(&boot_lock); | 79 | spin_lock(&boot_lock); |
79 | spin_unlock(&boot_lock); | 80 | spin_unlock(&boot_lock); |
80 | } | 81 | } |
81 | 82 | ||
82 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 83 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
83 | { | 84 | { |
84 | unsigned long timeout; | 85 | unsigned long timeout; |
85 | 86 | ||
86 | /* | 87 | /* |
87 | * Set synchronisation state between this boot processor | 88 | * Set synchronisation state between this boot processor |
88 | * and the secondary one | 89 | * and the secondary one |
89 | */ | 90 | */ |
90 | spin_lock(&boot_lock); | 91 | spin_lock(&boot_lock); |
91 | 92 | ||
92 | /* | 93 | /* |
93 | * The secondary processor is waiting to be released from | 94 | * The secondary processor is waiting to be released from |
94 | * the holding pen - release it, then wait for it to flag | 95 | * the holding pen - release it, then wait for it to flag |
95 | * that it has been released by resetting pen_release. | 96 | * that it has been released by resetting pen_release. |
96 | * | 97 | * |
97 | * Note that "pen_release" is the hardware CPU ID, whereas | 98 | * Note that "pen_release" is the hardware CPU ID, whereas |
98 | * "cpu" is Linux's internal ID. | 99 | * "cpu" is Linux's internal ID. |
99 | */ | 100 | */ |
100 | write_pen_release(cpu); | 101 | write_pen_release(cpu); |
101 | 102 | ||
102 | /* | 103 | /* |
103 | * Send the secondary CPU a soft interrupt, thereby causing | 104 | * Send the secondary CPU a soft interrupt, thereby causing |
104 | * the boot monitor to read the system wide flags register, | 105 | * the boot monitor to read the system wide flags register, |
105 | * and branch to the address found there. | 106 | * and branch to the address found there. |
106 | */ | 107 | */ |
107 | smp_cross_call(cpumask_of(cpu), 1); | 108 | gic_raise_softirq(cpumask_of(cpu), 1); |
108 | 109 | ||
109 | timeout = jiffies + (1 * HZ); | 110 | timeout = jiffies + (1 * HZ); |
110 | while (time_before(jiffies, timeout)) { | 111 | while (time_before(jiffies, timeout)) { |
111 | smp_rmb(); | 112 | smp_rmb(); |
112 | if (pen_release == -1) | 113 | if (pen_release == -1) |
113 | break; | 114 | break; |
114 | 115 | ||
115 | udelay(10); | 116 | udelay(10); |
116 | } | 117 | } |
117 | 118 | ||
118 | /* | 119 | /* |
119 | * now the secondary core is starting up let it run its | 120 | * now the secondary core is starting up let it run its |
120 | * calibrations, then wait for it to finish | 121 | * calibrations, then wait for it to finish |
121 | */ | 122 | */ |
122 | spin_unlock(&boot_lock); | 123 | spin_unlock(&boot_lock); |
123 | 124 | ||
124 | return pen_release != -1 ? -ENOSYS : 0; | 125 | return pen_release != -1 ? -ENOSYS : 0; |
125 | } | 126 | } |
126 | 127 | ||
127 | /* | 128 | /* |
128 | * Initialise the CPU possible map early - this describes the CPUs | 129 | * Initialise the CPU possible map early - this describes the CPUs |
129 | * which may be present or become present in the system. | 130 | * which may be present or become present in the system. |
130 | */ | 131 | */ |
131 | 132 | ||
132 | void __init smp_init_cpus(void) | 133 | void __init smp_init_cpus(void) |
133 | { | 134 | { |
134 | void __iomem *scu_base = scu_base_addr(); | 135 | void __iomem *scu_base = scu_base_addr(); |
135 | unsigned int i, ncores; | 136 | unsigned int i, ncores; |
136 | 137 | ||
137 | ncores = scu_base ? scu_get_core_count(scu_base) : 1; | 138 | ncores = scu_base ? scu_get_core_count(scu_base) : 1; |
138 | 139 | ||
139 | /* sanity check */ | 140 | /* sanity check */ |
140 | if (ncores > NR_CPUS) { | 141 | if (ncores > NR_CPUS) { |
141 | printk(KERN_WARNING | 142 | printk(KERN_WARNING |
142 | "EXYNOS4: no. of cores (%d) greater than configured " | 143 | "EXYNOS4: no. of cores (%d) greater than configured " |
143 | "maximum of %d - clipping\n", | 144 | "maximum of %d - clipping\n", |
144 | ncores, NR_CPUS); | 145 | ncores, NR_CPUS); |
145 | ncores = NR_CPUS; | 146 | ncores = NR_CPUS; |
146 | } | 147 | } |
147 | 148 | ||
148 | for (i = 0; i < ncores; i++) | 149 | for (i = 0; i < ncores; i++) |
149 | set_cpu_possible(i, true); | 150 | set_cpu_possible(i, true); |
151 | |||
152 | set_smp_cross_call(gic_raise_softirq); | ||
150 | } | 153 | } |
151 | 154 | ||
152 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 155 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
153 | { | 156 | { |
154 | int i; | 157 | int i; |
155 | 158 | ||
156 | /* | 159 | /* |
157 | * Initialise the present map, which describes the set of CPUs | 160 | * Initialise the present map, which describes the set of CPUs |
158 | * actually populated at the present time. | 161 | * actually populated at the present time. |
159 | */ | 162 | */ |
160 | for (i = 0; i < max_cpus; i++) | 163 | for (i = 0; i < max_cpus; i++) |
161 | set_cpu_present(i, true); | 164 | set_cpu_present(i, true); |
162 | 165 | ||
163 | scu_enable(scu_base_addr()); | 166 | scu_enable(scu_base_addr()); |
164 | 167 | ||
165 | /* | 168 | /* |
166 | * Write the address of secondary startup into the | 169 | * Write the address of secondary startup into the |
167 | * system-wide flags register. The boot monitor waits | 170 | * system-wide flags register. The boot monitor waits |
168 | * until it receives a soft interrupt, and then the | 171 | * until it receives a soft interrupt, and then the |
169 | * secondary CPU branches to this address. | 172 | * secondary CPU branches to this address. |
170 | */ | 173 | */ |
171 | __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), S5P_VA_SYSRAM); | 174 | __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), S5P_VA_SYSRAM); |
172 | } | 175 | } |
173 | 176 |
arch/arm/mach-msm/include/mach/smp.h
1 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved. | File was deleted | |
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | */ | ||
12 | |||
13 | #ifndef __ASM_ARCH_MSM_SMP_H | ||
14 | #define __ASM_ARCH_MSM_SMP_H | ||
15 | |||
16 | #include <asm/hardware/gic.h> | ||
17 | |||
18 | static inline void smp_cross_call(const struct cpumask *mask, int ipi) | ||
19 | { | ||
20 | gic_raise_softirq(mask, ipi); | ||
21 | } | ||
22 | |||
23 | #endif | ||
24 | 1 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved. |
arch/arm/mach-msm/platsmp.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 ARM Ltd. | 2 | * Copyright (C) 2002 ARM Ltd. |
3 | * All Rights Reserved | 3 | * All Rights Reserved |
4 | * Copyright (c) 2010, Code Aurora Forum. All rights reserved. | 4 | * Copyright (c) 2010, Code Aurora Forum. All rights reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/jiffies.h> | 15 | #include <linux/jiffies.h> |
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | 18 | ||
19 | #include <asm/hardware/gic.h> | 19 | #include <asm/hardware/gic.h> |
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <asm/mach-types.h> | 21 | #include <asm/mach-types.h> |
22 | 22 | ||
23 | #include <mach/msm_iomap.h> | 23 | #include <mach/msm_iomap.h> |
24 | 24 | ||
25 | #include "scm-boot.h" | 25 | #include "scm-boot.h" |
26 | 26 | ||
27 | #define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x15A0 | 27 | #define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x15A0 |
28 | #define SCSS_CPU1CORE_RESET 0xD80 | 28 | #define SCSS_CPU1CORE_RESET 0xD80 |
29 | #define SCSS_DBG_STATUS_CORE_PWRDUP 0xE64 | 29 | #define SCSS_DBG_STATUS_CORE_PWRDUP 0xE64 |
30 | 30 | ||
31 | /* Mask for edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */ | 31 | /* Mask for edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */ |
32 | #define GIC_PPI_EDGE_MASK 0xFFFFD7FF | 32 | #define GIC_PPI_EDGE_MASK 0xFFFFD7FF |
33 | 33 | ||
34 | extern void msm_secondary_startup(void); | 34 | extern void msm_secondary_startup(void); |
35 | /* | 35 | /* |
36 | * control for which core is the next to come out of the secondary | 36 | * control for which core is the next to come out of the secondary |
37 | * boot "holding pen". | 37 | * boot "holding pen". |
38 | */ | 38 | */ |
39 | volatile int pen_release = -1; | 39 | volatile int pen_release = -1; |
40 | 40 | ||
41 | static DEFINE_SPINLOCK(boot_lock); | 41 | static DEFINE_SPINLOCK(boot_lock); |
42 | 42 | ||
43 | void __cpuinit platform_secondary_init(unsigned int cpu) | 43 | void __cpuinit platform_secondary_init(unsigned int cpu) |
44 | { | 44 | { |
45 | /* Configure edge-triggered PPIs */ | 45 | /* Configure edge-triggered PPIs */ |
46 | writel(GIC_PPI_EDGE_MASK, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4); | 46 | writel(GIC_PPI_EDGE_MASK, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4); |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * if any interrupts are already enabled for the primary | 49 | * if any interrupts are already enabled for the primary |
50 | * core (e.g. timer irq), then they will not have been enabled | 50 | * core (e.g. timer irq), then they will not have been enabled |
51 | * for us: do so | 51 | * for us: do so |
52 | */ | 52 | */ |
53 | gic_secondary_init(0); | 53 | gic_secondary_init(0); |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * let the primary processor know we're out of the | 56 | * let the primary processor know we're out of the |
57 | * pen, then head off into the C entry point | 57 | * pen, then head off into the C entry point |
58 | */ | 58 | */ |
59 | pen_release = -1; | 59 | pen_release = -1; |
60 | smp_wmb(); | 60 | smp_wmb(); |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Synchronise with the boot thread. | 63 | * Synchronise with the boot thread. |
64 | */ | 64 | */ |
65 | spin_lock(&boot_lock); | 65 | spin_lock(&boot_lock); |
66 | spin_unlock(&boot_lock); | 66 | spin_unlock(&boot_lock); |
67 | } | 67 | } |
68 | 68 | ||
69 | static __cpuinit void prepare_cold_cpu(unsigned int cpu) | 69 | static __cpuinit void prepare_cold_cpu(unsigned int cpu) |
70 | { | 70 | { |
71 | int ret; | 71 | int ret; |
72 | ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup), | 72 | ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup), |
73 | SCM_FLAG_COLDBOOT_CPU1); | 73 | SCM_FLAG_COLDBOOT_CPU1); |
74 | if (ret == 0) { | 74 | if (ret == 0) { |
75 | void *sc1_base_ptr; | 75 | void *sc1_base_ptr; |
76 | sc1_base_ptr = ioremap_nocache(0x00902000, SZ_4K*2); | 76 | sc1_base_ptr = ioremap_nocache(0x00902000, SZ_4K*2); |
77 | if (sc1_base_ptr) { | 77 | if (sc1_base_ptr) { |
78 | writel(0, sc1_base_ptr + VDD_SC1_ARRAY_CLAMP_GFS_CTL); | 78 | writel(0, sc1_base_ptr + VDD_SC1_ARRAY_CLAMP_GFS_CTL); |
79 | writel(0, sc1_base_ptr + SCSS_CPU1CORE_RESET); | 79 | writel(0, sc1_base_ptr + SCSS_CPU1CORE_RESET); |
80 | writel(3, sc1_base_ptr + SCSS_DBG_STATUS_CORE_PWRDUP); | 80 | writel(3, sc1_base_ptr + SCSS_DBG_STATUS_CORE_PWRDUP); |
81 | iounmap(sc1_base_ptr); | 81 | iounmap(sc1_base_ptr); |
82 | } | 82 | } |
83 | } else | 83 | } else |
84 | printk(KERN_DEBUG "Failed to set secondary core boot " | 84 | printk(KERN_DEBUG "Failed to set secondary core boot " |
85 | "address\n"); | 85 | "address\n"); |
86 | } | 86 | } |
87 | 87 | ||
88 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 88 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
89 | { | 89 | { |
90 | unsigned long timeout; | 90 | unsigned long timeout; |
91 | static int cold_boot_done; | 91 | static int cold_boot_done; |
92 | 92 | ||
93 | /* Only need to bring cpu out of reset this way once */ | 93 | /* Only need to bring cpu out of reset this way once */ |
94 | if (cold_boot_done == false) { | 94 | if (cold_boot_done == false) { |
95 | prepare_cold_cpu(cpu); | 95 | prepare_cold_cpu(cpu); |
96 | cold_boot_done = true; | 96 | cold_boot_done = true; |
97 | } | 97 | } |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * set synchronisation state between this boot processor | 100 | * set synchronisation state between this boot processor |
101 | * and the secondary one | 101 | * and the secondary one |
102 | */ | 102 | */ |
103 | spin_lock(&boot_lock); | 103 | spin_lock(&boot_lock); |
104 | 104 | ||
105 | /* | 105 | /* |
106 | * The secondary processor is waiting to be released from | 106 | * The secondary processor is waiting to be released from |
107 | * the holding pen - release it, then wait for it to flag | 107 | * the holding pen - release it, then wait for it to flag |
108 | * that it has been released by resetting pen_release. | 108 | * that it has been released by resetting pen_release. |
109 | * | 109 | * |
110 | * Note that "pen_release" is the hardware CPU ID, whereas | 110 | * Note that "pen_release" is the hardware CPU ID, whereas |
111 | * "cpu" is Linux's internal ID. | 111 | * "cpu" is Linux's internal ID. |
112 | */ | 112 | */ |
113 | pen_release = cpu; | 113 | pen_release = cpu; |
114 | __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); | 114 | __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); |
115 | outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); | 115 | outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); |
116 | 116 | ||
117 | /* | 117 | /* |
118 | * Send the secondary CPU a soft interrupt, thereby causing | 118 | * Send the secondary CPU a soft interrupt, thereby causing |
119 | * the boot monitor to read the system wide flags register, | 119 | * the boot monitor to read the system wide flags register, |
120 | * and branch to the address found there. | 120 | * and branch to the address found there. |
121 | */ | 121 | */ |
122 | smp_cross_call(cpumask_of(cpu), 1); | 122 | gic_raise_softirq(cpumask_of(cpu), 1); |
123 | 123 | ||
124 | timeout = jiffies + (1 * HZ); | 124 | timeout = jiffies + (1 * HZ); |
125 | while (time_before(jiffies, timeout)) { | 125 | while (time_before(jiffies, timeout)) { |
126 | smp_rmb(); | 126 | smp_rmb(); |
127 | if (pen_release == -1) | 127 | if (pen_release == -1) |
128 | break; | 128 | break; |
129 | 129 | ||
130 | udelay(10); | 130 | udelay(10); |
131 | } | 131 | } |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * now the secondary core is starting up let it run its | 134 | * now the secondary core is starting up let it run its |
135 | * calibrations, then wait for it to finish | 135 | * calibrations, then wait for it to finish |
136 | */ | 136 | */ |
137 | spin_unlock(&boot_lock); | 137 | spin_unlock(&boot_lock); |
138 | 138 | ||
139 | return pen_release != -1 ? -ENOSYS : 0; | 139 | return pen_release != -1 ? -ENOSYS : 0; |
140 | } | 140 | } |
141 | 141 | ||
142 | /* | 142 | /* |
143 | * Initialise the CPU possible map early - this describes the CPUs | 143 | * Initialise the CPU possible map early - this describes the CPUs |
144 | * which may be present or become present in the system. The msm8x60 | 144 | * which may be present or become present in the system. The msm8x60 |
145 | * does not support the ARM SCU, so just set the possible cpu mask to | 145 | * does not support the ARM SCU, so just set the possible cpu mask to |
146 | * NR_CPUS. | 146 | * NR_CPUS. |
147 | */ | 147 | */ |
148 | void __init smp_init_cpus(void) | 148 | void __init smp_init_cpus(void) |
149 | { | 149 | { |
150 | unsigned int i; | 150 | unsigned int i; |
151 | 151 | ||
152 | for (i = 0; i < NR_CPUS; i++) | 152 | for (i = 0; i < NR_CPUS; i++) |
153 | set_cpu_possible(i, true); | 153 | set_cpu_possible(i, true); |
154 | |||
155 | set_smp_cross_call(gic_raise_softirq); | ||
154 | } | 156 | } |
155 | 157 | ||
156 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 158 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
157 | { | 159 | { |
158 | int i; | 160 | int i; |
159 | 161 | ||
160 | /* | 162 | /* |
161 | * Initialise the present map, which describes the set of CPUs | 163 | * Initialise the present map, which describes the set of CPUs |
162 | * actually populated at the present time. | 164 | * actually populated at the present time. |
163 | */ | 165 | */ |
164 | for (i = 0; i < max_cpus; i++) | 166 | for (i = 0; i < max_cpus; i++) |
165 | set_cpu_present(i, true); | 167 | set_cpu_present(i, true); |
166 | } | 168 | } |
167 | 169 |
arch/arm/mach-omap2/omap-smp.c
1 | /* | 1 | /* |
2 | * OMAP4 SMP source file. It contains platform specific fucntions | 2 | * OMAP4 SMP source file. It contains platform specific fucntions |
3 | * needed for the linux smp kernel. | 3 | * needed for the linux smp kernel. |
4 | * | 4 | * |
5 | * Copyright (C) 2009 Texas Instruments, Inc. | 5 | * Copyright (C) 2009 Texas Instruments, Inc. |
6 | * | 6 | * |
7 | * Author: | 7 | * Author: |
8 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | 8 | * Santosh Shilimkar <santosh.shilimkar@ti.com> |
9 | * | 9 | * |
10 | * Platform file needed for the OMAP4 SMP. This file is based on arm | 10 | * Platform file needed for the OMAP4 SMP. This file is based on arm |
11 | * realview smp platform. | 11 | * realview smp platform. |
12 | * * Copyright (c) 2002 ARM Limited. | 12 | * * Copyright (c) 2002 ARM Limited. |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify | 14 | * This program is free software; you can redistribute it and/or modify |
15 | * it under the terms of the GNU General Public License version 2 as | 15 | * it under the terms of the GNU General Public License version 2 as |
16 | * published by the Free Software Foundation. | 16 | * published by the Free Software Foundation. |
17 | */ | 17 | */ |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
20 | #include <linux/smp.h> | 20 | #include <linux/smp.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | 22 | ||
23 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
24 | #include <asm/hardware/gic.h> | ||
24 | #include <asm/smp_scu.h> | 25 | #include <asm/smp_scu.h> |
25 | #include <mach/hardware.h> | 26 | #include <mach/hardware.h> |
26 | #include <mach/omap4-common.h> | 27 | #include <mach/omap4-common.h> |
27 | 28 | ||
28 | /* SCU base address */ | 29 | /* SCU base address */ |
29 | static void __iomem *scu_base; | 30 | static void __iomem *scu_base; |
30 | 31 | ||
31 | static DEFINE_SPINLOCK(boot_lock); | 32 | static DEFINE_SPINLOCK(boot_lock); |
32 | 33 | ||
33 | void __cpuinit platform_secondary_init(unsigned int cpu) | 34 | void __cpuinit platform_secondary_init(unsigned int cpu) |
34 | { | 35 | { |
35 | /* | 36 | /* |
36 | * If any interrupts are already enabled for the primary | 37 | * If any interrupts are already enabled for the primary |
37 | * core (e.g. timer irq), then they will not have been enabled | 38 | * core (e.g. timer irq), then they will not have been enabled |
38 | * for us: do so | 39 | * for us: do so |
39 | */ | 40 | */ |
40 | gic_secondary_init(0); | 41 | gic_secondary_init(0); |
41 | 42 | ||
42 | /* | 43 | /* |
43 | * Synchronise with the boot thread. | 44 | * Synchronise with the boot thread. |
44 | */ | 45 | */ |
45 | spin_lock(&boot_lock); | 46 | spin_lock(&boot_lock); |
46 | spin_unlock(&boot_lock); | 47 | spin_unlock(&boot_lock); |
47 | } | 48 | } |
48 | 49 | ||
49 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 50 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
50 | { | 51 | { |
51 | /* | 52 | /* |
52 | * Set synchronisation state between this boot processor | 53 | * Set synchronisation state between this boot processor |
53 | * and the secondary one | 54 | * and the secondary one |
54 | */ | 55 | */ |
55 | spin_lock(&boot_lock); | 56 | spin_lock(&boot_lock); |
56 | 57 | ||
57 | /* | 58 | /* |
58 | * Update the AuxCoreBoot0 with boot state for secondary core. | 59 | * Update the AuxCoreBoot0 with boot state for secondary core. |
59 | * omap_secondary_startup() routine will hold the secondary core till | 60 | * omap_secondary_startup() routine will hold the secondary core till |
60 | * the AuxCoreBoot1 register is updated with cpu state | 61 | * the AuxCoreBoot1 register is updated with cpu state |
61 | * A barrier is added to ensure that write buffer is drained | 62 | * A barrier is added to ensure that write buffer is drained |
62 | */ | 63 | */ |
63 | omap_modify_auxcoreboot0(0x200, 0xfffffdff); | 64 | omap_modify_auxcoreboot0(0x200, 0xfffffdff); |
64 | flush_cache_all(); | 65 | flush_cache_all(); |
65 | smp_wmb(); | 66 | smp_wmb(); |
66 | smp_cross_call(cpumask_of(cpu), 1); | 67 | gic_raise_softirq(cpumask_of(cpu), 1); |
67 | 68 | ||
68 | /* | 69 | /* |
69 | * Now the secondary core is starting up let it run its | 70 | * Now the secondary core is starting up let it run its |
70 | * calibrations, then wait for it to finish | 71 | * calibrations, then wait for it to finish |
71 | */ | 72 | */ |
72 | spin_unlock(&boot_lock); | 73 | spin_unlock(&boot_lock); |
73 | 74 | ||
74 | return 0; | 75 | return 0; |
75 | } | 76 | } |
76 | 77 | ||
77 | static void __init wakeup_secondary(void) | 78 | static void __init wakeup_secondary(void) |
78 | { | 79 | { |
79 | /* | 80 | /* |
80 | * Write the address of secondary startup routine into the | 81 | * Write the address of secondary startup routine into the |
81 | * AuxCoreBoot1 where ROM code will jump and start executing | 82 | * AuxCoreBoot1 where ROM code will jump and start executing |
82 | * on secondary core once out of WFE | 83 | * on secondary core once out of WFE |
83 | * A barrier is added to ensure that write buffer is drained | 84 | * A barrier is added to ensure that write buffer is drained |
84 | */ | 85 | */ |
85 | omap_auxcoreboot_addr(virt_to_phys(omap_secondary_startup)); | 86 | omap_auxcoreboot_addr(virt_to_phys(omap_secondary_startup)); |
86 | smp_wmb(); | 87 | smp_wmb(); |
87 | 88 | ||
88 | /* | 89 | /* |
89 | * Send a 'sev' to wake the secondary core from WFE. | 90 | * Send a 'sev' to wake the secondary core from WFE. |
90 | * Drain the outstanding writes to memory | 91 | * Drain the outstanding writes to memory |
91 | */ | 92 | */ |
92 | dsb_sev(); | 93 | dsb_sev(); |
93 | mb(); | 94 | mb(); |
94 | } | 95 | } |
95 | 96 | ||
96 | /* | 97 | /* |
97 | * Initialise the CPU possible map early - this describes the CPUs | 98 | * Initialise the CPU possible map early - this describes the CPUs |
98 | * which may be present or become present in the system. | 99 | * which may be present or become present in the system. |
99 | */ | 100 | */ |
100 | void __init smp_init_cpus(void) | 101 | void __init smp_init_cpus(void) |
101 | { | 102 | { |
102 | unsigned int i, ncores; | 103 | unsigned int i, ncores; |
103 | 104 | ||
104 | /* Never released */ | 105 | /* Never released */ |
105 | scu_base = ioremap(OMAP44XX_SCU_BASE, SZ_256); | 106 | scu_base = ioremap(OMAP44XX_SCU_BASE, SZ_256); |
106 | BUG_ON(!scu_base); | 107 | BUG_ON(!scu_base); |
107 | 108 | ||
108 | ncores = scu_get_core_count(scu_base); | 109 | ncores = scu_get_core_count(scu_base); |
109 | 110 | ||
110 | /* sanity check */ | 111 | /* sanity check */ |
111 | if (ncores > NR_CPUS) { | 112 | if (ncores > NR_CPUS) { |
112 | printk(KERN_WARNING | 113 | printk(KERN_WARNING |
113 | "OMAP4: no. of cores (%d) greater than configured " | 114 | "OMAP4: no. of cores (%d) greater than configured " |
114 | "maximum of %d - clipping\n", | 115 | "maximum of %d - clipping\n", |
115 | ncores, NR_CPUS); | 116 | ncores, NR_CPUS); |
116 | ncores = NR_CPUS; | 117 | ncores = NR_CPUS; |
117 | } | 118 | } |
118 | 119 | ||
119 | for (i = 0; i < ncores; i++) | 120 | for (i = 0; i < ncores; i++) |
120 | set_cpu_possible(i, true); | 121 | set_cpu_possible(i, true); |
122 | |||
123 | set_smp_cross_call(gic_raise_softirq); | ||
121 | } | 124 | } |
122 | 125 | ||
123 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 126 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
124 | { | 127 | { |
125 | int i; | 128 | int i; |
126 | 129 | ||
127 | /* | 130 | /* |
128 | * Initialise the present map, which describes the set of CPUs | 131 | * Initialise the present map, which describes the set of CPUs |
129 | * actually populated at the present time. | 132 | * actually populated at the present time. |
130 | */ | 133 | */ |
131 | for (i = 0; i < max_cpus; i++) | 134 | for (i = 0; i < max_cpus; i++) |
132 | set_cpu_present(i, true); | 135 | set_cpu_present(i, true); |
133 | 136 | ||
134 | /* | 137 | /* |
135 | * Initialise the SCU and wake up the secondary core using | 138 | * Initialise the SCU and wake up the secondary core using |
136 | * wakeup_secondary(). | 139 | * wakeup_secondary(). |
137 | */ | 140 | */ |
138 | scu_enable(scu_base); | 141 | scu_enable(scu_base); |
139 | wakeup_secondary(); | 142 | wakeup_secondary(); |
140 | } | 143 | } |
141 | 144 |
arch/arm/mach-realview/include/mach/smp.h
1 | #ifndef ASMARM_ARCH_SMP_H | File was deleted | |
2 | #define ASMARM_ARCH_SMP_H | ||
3 | |||
4 | #include <asm/hardware/gic.h> | ||
5 | |||
6 | /* | ||
7 | * We use IRQ1 as the IPI | ||
8 | */ | ||
9 | static inline void smp_cross_call(const struct cpumask *mask, int ipi) | ||
10 | { | ||
11 | gic_raise_softirq(mask, ipi); | ||
12 | } | ||
13 | |||
14 | #endif | ||
15 | 1 | #ifndef ASMARM_ARCH_SMP_H |
arch/arm/mach-realview/platsmp.c
1 | /* | 1 | /* |
2 | * linux/arch/arm/mach-realview/platsmp.c | 2 | * linux/arch/arm/mach-realview/platsmp.c |
3 | * | 3 | * |
4 | * Copyright (C) 2002 ARM Ltd. | 4 | * Copyright (C) 2002 ARM Ltd. |
5 | * All Rights Reserved | 5 | * All Rights Reserved |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | 15 | ||
16 | #include <mach/hardware.h> | 16 | #include <mach/hardware.h> |
17 | #include <asm/hardware/gic.h> | ||
17 | #include <asm/mach-types.h> | 18 | #include <asm/mach-types.h> |
18 | #include <asm/smp_scu.h> | 19 | #include <asm/smp_scu.h> |
19 | #include <asm/unified.h> | 20 | #include <asm/unified.h> |
20 | 21 | ||
21 | #include <mach/board-eb.h> | 22 | #include <mach/board-eb.h> |
22 | #include <mach/board-pb11mp.h> | 23 | #include <mach/board-pb11mp.h> |
23 | #include <mach/board-pbx.h> | 24 | #include <mach/board-pbx.h> |
24 | 25 | ||
25 | #include "core.h" | 26 | #include "core.h" |
26 | 27 | ||
27 | extern void versatile_secondary_startup(void); | 28 | extern void versatile_secondary_startup(void); |
28 | 29 | ||
29 | static void __iomem *scu_base_addr(void) | 30 | static void __iomem *scu_base_addr(void) |
30 | { | 31 | { |
31 | if (machine_is_realview_eb_mp()) | 32 | if (machine_is_realview_eb_mp()) |
32 | return __io_address(REALVIEW_EB11MP_SCU_BASE); | 33 | return __io_address(REALVIEW_EB11MP_SCU_BASE); |
33 | else if (machine_is_realview_pb11mp()) | 34 | else if (machine_is_realview_pb11mp()) |
34 | return __io_address(REALVIEW_TC11MP_SCU_BASE); | 35 | return __io_address(REALVIEW_TC11MP_SCU_BASE); |
35 | else if (machine_is_realview_pbx() && | 36 | else if (machine_is_realview_pbx() && |
36 | (core_tile_pbx11mp() || core_tile_pbxa9mp())) | 37 | (core_tile_pbx11mp() || core_tile_pbxa9mp())) |
37 | return __io_address(REALVIEW_PBX_TILE_SCU_BASE); | 38 | return __io_address(REALVIEW_PBX_TILE_SCU_BASE); |
38 | else | 39 | else |
39 | return (void __iomem *)0; | 40 | return (void __iomem *)0; |
40 | } | 41 | } |
41 | 42 | ||
42 | /* | 43 | /* |
43 | * Initialise the CPU possible map early - this describes the CPUs | 44 | * Initialise the CPU possible map early - this describes the CPUs |
44 | * which may be present or become present in the system. | 45 | * which may be present or become present in the system. |
45 | */ | 46 | */ |
46 | void __init smp_init_cpus(void) | 47 | void __init smp_init_cpus(void) |
47 | { | 48 | { |
48 | void __iomem *scu_base = scu_base_addr(); | 49 | void __iomem *scu_base = scu_base_addr(); |
49 | unsigned int i, ncores; | 50 | unsigned int i, ncores; |
50 | 51 | ||
51 | ncores = scu_base ? scu_get_core_count(scu_base) : 1; | 52 | ncores = scu_base ? scu_get_core_count(scu_base) : 1; |
52 | 53 | ||
53 | /* sanity check */ | 54 | /* sanity check */ |
54 | if (ncores > NR_CPUS) { | 55 | if (ncores > NR_CPUS) { |
55 | printk(KERN_WARNING | 56 | printk(KERN_WARNING |
56 | "Realview: no. of cores (%d) greater than configured " | 57 | "Realview: no. of cores (%d) greater than configured " |
57 | "maximum of %d - clipping\n", | 58 | "maximum of %d - clipping\n", |
58 | ncores, NR_CPUS); | 59 | ncores, NR_CPUS); |
59 | ncores = NR_CPUS; | 60 | ncores = NR_CPUS; |
60 | } | 61 | } |
61 | 62 | ||
62 | for (i = 0; i < ncores; i++) | 63 | for (i = 0; i < ncores; i++) |
63 | set_cpu_possible(i, true); | 64 | set_cpu_possible(i, true); |
65 | |||
66 | set_smp_cross_call(gic_raise_softirq); | ||
64 | } | 67 | } |
65 | 68 | ||
66 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 69 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
67 | { | 70 | { |
68 | int i; | 71 | int i; |
69 | 72 | ||
70 | /* | 73 | /* |
71 | * Initialise the present map, which describes the set of CPUs | 74 | * Initialise the present map, which describes the set of CPUs |
72 | * actually populated at the present time. | 75 | * actually populated at the present time. |
73 | */ | 76 | */ |
74 | for (i = 0; i < max_cpus; i++) | 77 | for (i = 0; i < max_cpus; i++) |
75 | set_cpu_present(i, true); | 78 | set_cpu_present(i, true); |
76 | 79 | ||
77 | scu_enable(scu_base_addr()); | 80 | scu_enable(scu_base_addr()); |
78 | 81 | ||
79 | /* | 82 | /* |
80 | * Write the address of secondary startup into the | 83 | * Write the address of secondary startup into the |
81 | * system-wide flags register. The BootMonitor waits | 84 | * system-wide flags register. The BootMonitor waits |
82 | * until it receives a soft interrupt, and then the | 85 | * until it receives a soft interrupt, and then the |
83 | * secondary CPU branches to this address. | 86 | * secondary CPU branches to this address. |
84 | */ | 87 | */ |
85 | __raw_writel(BSYM(virt_to_phys(versatile_secondary_startup)), | 88 | __raw_writel(BSYM(virt_to_phys(versatile_secondary_startup)), |
86 | __io_address(REALVIEW_SYS_FLAGSSET)); | 89 | __io_address(REALVIEW_SYS_FLAGSSET)); |
87 | } | 90 | } |
88 | 91 |
arch/arm/mach-shmobile/include/mach/smp.h
1 | #ifndef __MACH_SMP_H | File was deleted | |
2 | #define __MACH_SMP_H | ||
3 | |||
4 | #include <asm/hardware/gic.h> | ||
5 | |||
6 | /* | ||
7 | * We use IRQ1 as the IPI | ||
8 | */ | ||
9 | static inline void smp_cross_call(const struct cpumask *mask, int ipi) | ||
10 | { | ||
11 | #if defined(CONFIG_ARM_GIC) | ||
12 | gic_raise_softirq(mask, ipi); | ||
13 | #endif | ||
14 | } | ||
15 | |||
16 | #endif | ||
17 | 1 | #ifndef __MACH_SMP_H |
arch/arm/mach-shmobile/platsmp.c
1 | /* | 1 | /* |
2 | * SMP support for R-Mobile / SH-Mobile | 2 | * SMP support for R-Mobile / SH-Mobile |
3 | * | 3 | * |
4 | * Copyright (C) 2010 Magnus Damm | 4 | * Copyright (C) 2010 Magnus Damm |
5 | * Copyright (C) 2011 Paul Mundt | 5 | * Copyright (C) 2011 Paul Mundt |
6 | * | 6 | * |
7 | * Based on vexpress, Copyright (C) 2002 ARM Ltd, All Rights Reserved | 7 | * Based on vexpress, Copyright (C) 2002 ARM Ltd, All Rights Reserved |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <asm/hardware/gic.h> | ||
19 | #include <asm/localtimer.h> | 20 | #include <asm/localtimer.h> |
20 | #include <asm/mach-types.h> | 21 | #include <asm/mach-types.h> |
21 | #include <mach/common.h> | 22 | #include <mach/common.h> |
22 | 23 | ||
23 | static unsigned int __init shmobile_smp_get_core_count(void) | 24 | static unsigned int __init shmobile_smp_get_core_count(void) |
24 | { | 25 | { |
25 | if (machine_is_ag5evm()) | 26 | if (machine_is_ag5evm()) |
26 | return sh73a0_get_core_count(); | 27 | return sh73a0_get_core_count(); |
27 | 28 | ||
28 | return 1; | 29 | return 1; |
29 | } | 30 | } |
30 | 31 | ||
31 | static void __init shmobile_smp_prepare_cpus(void) | 32 | static void __init shmobile_smp_prepare_cpus(void) |
32 | { | 33 | { |
33 | if (machine_is_ag5evm()) | 34 | if (machine_is_ag5evm()) |
34 | sh73a0_smp_prepare_cpus(); | 35 | sh73a0_smp_prepare_cpus(); |
35 | } | 36 | } |
36 | 37 | ||
37 | void __cpuinit platform_secondary_init(unsigned int cpu) | 38 | void __cpuinit platform_secondary_init(unsigned int cpu) |
38 | { | 39 | { |
39 | trace_hardirqs_off(); | 40 | trace_hardirqs_off(); |
40 | 41 | ||
41 | if (machine_is_ag5evm()) | 42 | if (machine_is_ag5evm()) |
42 | sh73a0_secondary_init(cpu); | 43 | sh73a0_secondary_init(cpu); |
43 | } | 44 | } |
44 | 45 | ||
45 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 46 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
46 | { | 47 | { |
47 | if (machine_is_ag5evm()) | 48 | if (machine_is_ag5evm()) |
48 | return sh73a0_boot_secondary(cpu); | 49 | return sh73a0_boot_secondary(cpu); |
49 | 50 | ||
50 | return -ENOSYS; | 51 | return -ENOSYS; |
51 | } | 52 | } |
52 | 53 | ||
53 | void __init smp_init_cpus(void) | 54 | void __init smp_init_cpus(void) |
54 | { | 55 | { |
55 | unsigned int ncores = shmobile_smp_get_core_count(); | 56 | unsigned int ncores = shmobile_smp_get_core_count(); |
56 | unsigned int i; | 57 | unsigned int i; |
57 | 58 | ||
58 | for (i = 0; i < ncores; i++) | 59 | for (i = 0; i < ncores; i++) |
59 | set_cpu_possible(i, true); | 60 | set_cpu_possible(i, true); |
61 | |||
62 | set_smp_cross_call(gic_raise_softirq); | ||
60 | } | 63 | } |
61 | 64 | ||
62 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 65 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
63 | { | 66 | { |
64 | int i; | 67 | int i; |
65 | 68 | ||
66 | for (i = 0; i < max_cpus; i++) | 69 | for (i = 0; i < max_cpus; i++) |
67 | set_cpu_present(i, true); | 70 | set_cpu_present(i, true); |
68 | 71 | ||
69 | shmobile_smp_prepare_cpus(); | 72 | shmobile_smp_prepare_cpus(); |
70 | } | 73 | } |
71 | 74 |
arch/arm/mach-tegra/include/mach/smp.h
1 | #ifndef ASMARM_ARCH_SMP_H | File was deleted | |
2 | #define ASMARM_ARCH_SMP_H | ||
3 | |||
4 | #include <asm/hardware/gic.h> | ||
5 | |||
6 | /* | ||
7 | * We use IRQ1 as the IPI | ||
8 | */ | ||
9 | static inline void smp_cross_call(const struct cpumask *mask, int ipi) | ||
10 | { | ||
11 | gic_raise_softirq(mask, ipi); | ||
12 | } | ||
13 | |||
14 | #endif | ||
15 | 1 | #ifndef ASMARM_ARCH_SMP_H |
arch/arm/mach-tegra/platsmp.c
1 | /* | 1 | /* |
2 | * linux/arch/arm/mach-tegra/platsmp.c | 2 | * linux/arch/arm/mach-tegra/platsmp.c |
3 | * | 3 | * |
4 | * Copyright (C) 2002 ARM Ltd. | 4 | * Copyright (C) 2002 ARM Ltd. |
5 | * All Rights Reserved | 5 | * All Rights Reserved |
6 | * | 6 | * |
7 | * Copyright (C) 2009 Palm | 7 | * Copyright (C) 2009 Palm |
8 | * All Rights Reserved | 8 | * All Rights Reserved |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/jiffies.h> | 18 | #include <linux/jiffies.h> |
19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | 21 | ||
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <asm/hardware/gic.h> | ||
23 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
24 | #include <asm/mach-types.h> | 25 | #include <asm/mach-types.h> |
25 | #include <asm/smp_scu.h> | 26 | #include <asm/smp_scu.h> |
26 | 27 | ||
27 | #include <mach/iomap.h> | 28 | #include <mach/iomap.h> |
28 | 29 | ||
29 | extern void tegra_secondary_startup(void); | 30 | extern void tegra_secondary_startup(void); |
30 | 31 | ||
31 | static DEFINE_SPINLOCK(boot_lock); | 32 | static DEFINE_SPINLOCK(boot_lock); |
32 | static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE); | 33 | static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE); |
33 | 34 | ||
34 | #define EVP_CPU_RESET_VECTOR \ | 35 | #define EVP_CPU_RESET_VECTOR \ |
35 | (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100) | 36 | (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100) |
36 | #define CLK_RST_CONTROLLER_CLK_CPU_CMPLX \ | 37 | #define CLK_RST_CONTROLLER_CLK_CPU_CMPLX \ |
37 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x4c) | 38 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x4c) |
38 | #define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR \ | 39 | #define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR \ |
39 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x344) | 40 | (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x344) |
40 | 41 | ||
41 | void __cpuinit platform_secondary_init(unsigned int cpu) | 42 | void __cpuinit platform_secondary_init(unsigned int cpu) |
42 | { | 43 | { |
43 | /* | 44 | /* |
44 | * if any interrupts are already enabled for the primary | 45 | * if any interrupts are already enabled for the primary |
45 | * core (e.g. timer irq), then they will not have been enabled | 46 | * core (e.g. timer irq), then they will not have been enabled |
46 | * for us: do so | 47 | * for us: do so |
47 | */ | 48 | */ |
48 | gic_secondary_init(0); | 49 | gic_secondary_init(0); |
49 | 50 | ||
50 | /* | 51 | /* |
51 | * Synchronise with the boot thread. | 52 | * Synchronise with the boot thread. |
52 | */ | 53 | */ |
53 | spin_lock(&boot_lock); | 54 | spin_lock(&boot_lock); |
54 | spin_unlock(&boot_lock); | 55 | spin_unlock(&boot_lock); |
55 | } | 56 | } |
56 | 57 | ||
57 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 58 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
58 | { | 59 | { |
59 | unsigned long old_boot_vector; | 60 | unsigned long old_boot_vector; |
60 | unsigned long boot_vector; | 61 | unsigned long boot_vector; |
61 | unsigned long timeout; | 62 | unsigned long timeout; |
62 | u32 reg; | 63 | u32 reg; |
63 | 64 | ||
64 | /* | 65 | /* |
65 | * set synchronisation state between this boot processor | 66 | * set synchronisation state between this boot processor |
66 | * and the secondary one | 67 | * and the secondary one |
67 | */ | 68 | */ |
68 | spin_lock(&boot_lock); | 69 | spin_lock(&boot_lock); |
69 | 70 | ||
70 | 71 | ||
71 | /* set the reset vector to point to the secondary_startup routine */ | 72 | /* set the reset vector to point to the secondary_startup routine */ |
72 | 73 | ||
73 | boot_vector = virt_to_phys(tegra_secondary_startup); | 74 | boot_vector = virt_to_phys(tegra_secondary_startup); |
74 | old_boot_vector = readl(EVP_CPU_RESET_VECTOR); | 75 | old_boot_vector = readl(EVP_CPU_RESET_VECTOR); |
75 | writel(boot_vector, EVP_CPU_RESET_VECTOR); | 76 | writel(boot_vector, EVP_CPU_RESET_VECTOR); |
76 | 77 | ||
77 | /* enable cpu clock on cpu1 */ | 78 | /* enable cpu clock on cpu1 */ |
78 | reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX); | 79 | reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX); |
79 | writel(reg & ~(1<<9), CLK_RST_CONTROLLER_CLK_CPU_CMPLX); | 80 | writel(reg & ~(1<<9), CLK_RST_CONTROLLER_CLK_CPU_CMPLX); |
80 | 81 | ||
81 | reg = (1<<13) | (1<<9) | (1<<5) | (1<<1); | 82 | reg = (1<<13) | (1<<9) | (1<<5) | (1<<1); |
82 | writel(reg, CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR); | 83 | writel(reg, CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR); |
83 | 84 | ||
84 | smp_wmb(); | 85 | smp_wmb(); |
85 | flush_cache_all(); | 86 | flush_cache_all(); |
86 | 87 | ||
87 | /* unhalt the cpu */ | 88 | /* unhalt the cpu */ |
88 | writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14); | 89 | writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14); |
89 | 90 | ||
90 | timeout = jiffies + (1 * HZ); | 91 | timeout = jiffies + (1 * HZ); |
91 | while (time_before(jiffies, timeout)) { | 92 | while (time_before(jiffies, timeout)) { |
92 | if (readl(EVP_CPU_RESET_VECTOR) != boot_vector) | 93 | if (readl(EVP_CPU_RESET_VECTOR) != boot_vector) |
93 | break; | 94 | break; |
94 | udelay(10); | 95 | udelay(10); |
95 | } | 96 | } |
96 | 97 | ||
97 | /* put the old boot vector back */ | 98 | /* put the old boot vector back */ |
98 | writel(old_boot_vector, EVP_CPU_RESET_VECTOR); | 99 | writel(old_boot_vector, EVP_CPU_RESET_VECTOR); |
99 | 100 | ||
100 | /* | 101 | /* |
101 | * now the secondary core is starting up let it run its | 102 | * now the secondary core is starting up let it run its |
102 | * calibrations, then wait for it to finish | 103 | * calibrations, then wait for it to finish |
103 | */ | 104 | */ |
104 | spin_unlock(&boot_lock); | 105 | spin_unlock(&boot_lock); |
105 | 106 | ||
106 | return 0; | 107 | return 0; |
107 | } | 108 | } |
108 | 109 | ||
109 | /* | 110 | /* |
110 | * Initialise the CPU possible map early - this describes the CPUs | 111 | * Initialise the CPU possible map early - this describes the CPUs |
111 | * which may be present or become present in the system. | 112 | * which may be present or become present in the system. |
112 | */ | 113 | */ |
113 | void __init smp_init_cpus(void) | 114 | void __init smp_init_cpus(void) |
114 | { | 115 | { |
115 | unsigned int i, ncores = scu_get_core_count(scu_base); | 116 | unsigned int i, ncores = scu_get_core_count(scu_base); |
116 | 117 | ||
117 | if (ncores > NR_CPUS) { | 118 | if (ncores > NR_CPUS) { |
118 | printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n", | 119 | printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n", |
119 | ncores, NR_CPUS); | 120 | ncores, NR_CPUS); |
120 | ncores = NR_CPUS; | 121 | ncores = NR_CPUS; |
121 | } | 122 | } |
122 | 123 | ||
123 | for (i = 0; i < ncores; i++) | 124 | for (i = 0; i < ncores; i++) |
124 | cpu_set(i, cpu_possible_map); | 125 | cpu_set(i, cpu_possible_map); |
126 | |||
127 | set_smp_cross_call(gic_raise_softirq); | ||
125 | } | 128 | } |
126 | 129 | ||
127 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 130 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
128 | { | 131 | { |
129 | int i; | 132 | int i; |
130 | 133 | ||
131 | /* | 134 | /* |
132 | * Initialise the present map, which describes the set of CPUs | 135 | * Initialise the present map, which describes the set of CPUs |
133 | * actually populated at the present time. | 136 | * actually populated at the present time. |
134 | */ | 137 | */ |
135 | for (i = 0; i < max_cpus; i++) | 138 | for (i = 0; i < max_cpus; i++) |
136 | set_cpu_present(i, true); | 139 | set_cpu_present(i, true); |
137 | 140 | ||
138 | scu_enable(scu_base); | 141 | scu_enable(scu_base); |
139 | } | 142 | } |
140 | 143 |
arch/arm/mach-ux500/include/mach/smp.h
1 | /* | File was deleted | |
2 | * This file is based ARM realview platform. | ||
3 | * Copyright (C) ARM Limited. | ||
4 | * | ||
5 | * This file is licensed under the terms of the GNU General Public | ||
6 | * License version 2. This program is licensed "as is" without any | ||
7 | * warranty of any kind, whether express or implied. | ||
8 | */ | ||
9 | #ifndef ASMARM_ARCH_SMP_H | ||
10 | #define ASMARM_ARCH_SMP_H | ||
11 | |||
12 | #include <asm/hardware/gic.h> | ||
13 | |||
14 | /* This is required to wakeup the secondary core */ | ||
15 | extern void u8500_secondary_startup(void); | ||
16 | |||
17 | /* | ||
18 | * We use IRQ1 as the IPI | ||
19 | */ | ||
20 | static inline void smp_cross_call(const struct cpumask *mask, int ipi) | ||
21 | { | ||
22 | gic_raise_softirq(mask, ipi); | ||
23 | } | ||
24 | #endif | ||
25 | 1 | /* |
arch/arm/mach-ux500/platsmp.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2002 ARM Ltd. | 2 | * Copyright (C) 2002 ARM Ltd. |
3 | * Copyright (C) 2008 STMicroelctronics. | 3 | * Copyright (C) 2008 STMicroelctronics. |
4 | * Copyright (C) 2009 ST-Ericsson. | 4 | * Copyright (C) 2009 ST-Ericsson. |
5 | * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> | 5 | * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> |
6 | * | 6 | * |
7 | * This file is based on arm realview platform | 7 | * This file is based on arm realview platform |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | 19 | ||
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <asm/hardware/gic.h> | ||
21 | #include <asm/smp_scu.h> | 22 | #include <asm/smp_scu.h> |
22 | #include <mach/hardware.h> | 23 | #include <mach/hardware.h> |
23 | #include <mach/setup.h> | 24 | #include <mach/setup.h> |
24 | 25 | ||
25 | /* | 26 | /* |
26 | * control for which core is the next to come out of the secondary | 27 | * control for which core is the next to come out of the secondary |
27 | * boot "holding pen" | 28 | * boot "holding pen" |
28 | */ | 29 | */ |
29 | volatile int pen_release = -1; | 30 | volatile int pen_release = -1; |
30 | 31 | ||
31 | /* | 32 | /* |
32 | * Write pen_release in a way that is guaranteed to be visible to all | 33 | * Write pen_release in a way that is guaranteed to be visible to all |
33 | * observers, irrespective of whether they're taking part in coherency | 34 | * observers, irrespective of whether they're taking part in coherency |
34 | * or not. This is necessary for the hotplug code to work reliably. | 35 | * or not. This is necessary for the hotplug code to work reliably. |
35 | */ | 36 | */ |
36 | static void write_pen_release(int val) | 37 | static void write_pen_release(int val) |
37 | { | 38 | { |
38 | pen_release = val; | 39 | pen_release = val; |
39 | smp_wmb(); | 40 | smp_wmb(); |
40 | __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); | 41 | __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); |
41 | outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); | 42 | outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); |
42 | } | 43 | } |
43 | 44 | ||
44 | static void __iomem *scu_base_addr(void) | 45 | static void __iomem *scu_base_addr(void) |
45 | { | 46 | { |
46 | if (cpu_is_u5500()) | 47 | if (cpu_is_u5500()) |
47 | return __io_address(U5500_SCU_BASE); | 48 | return __io_address(U5500_SCU_BASE); |
48 | else if (cpu_is_u8500()) | 49 | else if (cpu_is_u8500()) |
49 | return __io_address(U8500_SCU_BASE); | 50 | return __io_address(U8500_SCU_BASE); |
50 | else | 51 | else |
51 | ux500_unknown_soc(); | 52 | ux500_unknown_soc(); |
52 | 53 | ||
53 | return NULL; | 54 | return NULL; |
54 | } | 55 | } |
55 | 56 | ||
56 | static DEFINE_SPINLOCK(boot_lock); | 57 | static DEFINE_SPINLOCK(boot_lock); |
57 | 58 | ||
58 | void __cpuinit platform_secondary_init(unsigned int cpu) | 59 | void __cpuinit platform_secondary_init(unsigned int cpu) |
59 | { | 60 | { |
60 | /* | 61 | /* |
61 | * if any interrupts are already enabled for the primary | 62 | * if any interrupts are already enabled for the primary |
62 | * core (e.g. timer irq), then they will not have been enabled | 63 | * core (e.g. timer irq), then they will not have been enabled |
63 | * for us: do so | 64 | * for us: do so |
64 | */ | 65 | */ |
65 | gic_secondary_init(0); | 66 | gic_secondary_init(0); |
66 | 67 | ||
67 | /* | 68 | /* |
68 | * let the primary processor know we're out of the | 69 | * let the primary processor know we're out of the |
69 | * pen, then head off into the C entry point | 70 | * pen, then head off into the C entry point |
70 | */ | 71 | */ |
71 | write_pen_release(-1); | 72 | write_pen_release(-1); |
72 | 73 | ||
73 | /* | 74 | /* |
74 | * Synchronise with the boot thread. | 75 | * Synchronise with the boot thread. |
75 | */ | 76 | */ |
76 | spin_lock(&boot_lock); | 77 | spin_lock(&boot_lock); |
77 | spin_unlock(&boot_lock); | 78 | spin_unlock(&boot_lock); |
78 | } | 79 | } |
79 | 80 | ||
80 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 81 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
81 | { | 82 | { |
82 | unsigned long timeout; | 83 | unsigned long timeout; |
83 | 84 | ||
84 | /* | 85 | /* |
85 | * set synchronisation state between this boot processor | 86 | * set synchronisation state between this boot processor |
86 | * and the secondary one | 87 | * and the secondary one |
87 | */ | 88 | */ |
88 | spin_lock(&boot_lock); | 89 | spin_lock(&boot_lock); |
89 | 90 | ||
90 | /* | 91 | /* |
91 | * The secondary processor is waiting to be released from | 92 | * The secondary processor is waiting to be released from |
92 | * the holding pen - release it, then wait for it to flag | 93 | * the holding pen - release it, then wait for it to flag |
93 | * that it has been released by resetting pen_release. | 94 | * that it has been released by resetting pen_release. |
94 | */ | 95 | */ |
95 | write_pen_release(cpu); | 96 | write_pen_release(cpu); |
96 | 97 | ||
97 | smp_cross_call(cpumask_of(cpu), 1); | 98 | gic_raise_softirq(cpumask_of(cpu), 1); |
98 | 99 | ||
99 | timeout = jiffies + (1 * HZ); | 100 | timeout = jiffies + (1 * HZ); |
100 | while (time_before(jiffies, timeout)) { | 101 | while (time_before(jiffies, timeout)) { |
101 | if (pen_release == -1) | 102 | if (pen_release == -1) |
102 | break; | 103 | break; |
103 | } | 104 | } |
104 | 105 | ||
105 | /* | 106 | /* |
106 | * now the secondary core is starting up let it run its | 107 | * now the secondary core is starting up let it run its |
107 | * calibrations, then wait for it to finish | 108 | * calibrations, then wait for it to finish |
108 | */ | 109 | */ |
109 | spin_unlock(&boot_lock); | 110 | spin_unlock(&boot_lock); |
110 | 111 | ||
111 | return pen_release != -1 ? -ENOSYS : 0; | 112 | return pen_release != -1 ? -ENOSYS : 0; |
112 | } | 113 | } |
113 | 114 | ||
114 | static void __init wakeup_secondary(void) | 115 | static void __init wakeup_secondary(void) |
115 | { | 116 | { |
116 | void __iomem *backupram; | 117 | void __iomem *backupram; |
117 | 118 | ||
118 | if (cpu_is_u5500()) | 119 | if (cpu_is_u5500()) |
119 | backupram = __io_address(U5500_BACKUPRAM0_BASE); | 120 | backupram = __io_address(U5500_BACKUPRAM0_BASE); |
120 | else if (cpu_is_u8500()) | 121 | else if (cpu_is_u8500()) |
121 | backupram = __io_address(U8500_BACKUPRAM0_BASE); | 122 | backupram = __io_address(U8500_BACKUPRAM0_BASE); |
122 | else | 123 | else |
123 | ux500_unknown_soc(); | 124 | ux500_unknown_soc(); |
124 | 125 | ||
125 | /* | 126 | /* |
126 | * write the address of secondary startup into the backup ram register | 127 | * write the address of secondary startup into the backup ram register |
127 | * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the | 128 | * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the |
128 | * backup ram register at offset 0x1FF0, which is what boot rom code | 129 | * backup ram register at offset 0x1FF0, which is what boot rom code |
129 | * is waiting for. This would wake up the secondary core from WFE | 130 | * is waiting for. This would wake up the secondary core from WFE |
130 | */ | 131 | */ |
131 | #define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4 | 132 | #define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4 |
132 | __raw_writel(virt_to_phys(u8500_secondary_startup), | 133 | __raw_writel(virt_to_phys(u8500_secondary_startup), |
133 | backupram + UX500_CPU1_JUMPADDR_OFFSET); | 134 | backupram + UX500_CPU1_JUMPADDR_OFFSET); |
134 | 135 | ||
135 | #define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0 | 136 | #define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0 |
136 | __raw_writel(0xA1FEED01, | 137 | __raw_writel(0xA1FEED01, |
137 | backupram + UX500_CPU1_WAKEMAGIC_OFFSET); | 138 | backupram + UX500_CPU1_WAKEMAGIC_OFFSET); |
138 | 139 | ||
139 | /* make sure write buffer is drained */ | 140 | /* make sure write buffer is drained */ |
140 | mb(); | 141 | mb(); |
141 | } | 142 | } |
142 | 143 | ||
143 | /* | 144 | /* |
144 | * Initialise the CPU possible map early - this describes the CPUs | 145 | * Initialise the CPU possible map early - this describes the CPUs |
145 | * which may be present or become present in the system. | 146 | * which may be present or become present in the system. |
146 | */ | 147 | */ |
147 | void __init smp_init_cpus(void) | 148 | void __init smp_init_cpus(void) |
148 | { | 149 | { |
149 | void __iomem *scu_base = scu_base_addr(); | 150 | void __iomem *scu_base = scu_base_addr(); |
150 | unsigned int i, ncores; | 151 | unsigned int i, ncores; |
151 | 152 | ||
152 | ncores = scu_base ? scu_get_core_count(scu_base) : 1; | 153 | ncores = scu_base ? scu_get_core_count(scu_base) : 1; |
153 | 154 | ||
154 | /* sanity check */ | 155 | /* sanity check */ |
155 | if (ncores > NR_CPUS) { | 156 | if (ncores > NR_CPUS) { |
156 | printk(KERN_WARNING | 157 | printk(KERN_WARNING |
157 | "U8500: no. of cores (%d) greater than configured " | 158 | "U8500: no. of cores (%d) greater than configured " |
158 | "maximum of %d - clipping\n", | 159 | "maximum of %d - clipping\n", |
159 | ncores, NR_CPUS); | 160 | ncores, NR_CPUS); |
160 | ncores = NR_CPUS; | 161 | ncores = NR_CPUS; |
161 | } | 162 | } |
162 | 163 | ||
163 | for (i = 0; i < ncores; i++) | 164 | for (i = 0; i < ncores; i++) |
164 | set_cpu_possible(i, true); | 165 | set_cpu_possible(i, true); |
166 | |||
167 | set_smp_cross_call(gic_raise_softirq); | ||
165 | } | 168 | } |
166 | 169 | ||
167 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 170 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
168 | { | 171 | { |
169 | int i; | 172 | int i; |
170 | 173 | ||
171 | /* | 174 | /* |
172 | * Initialise the present map, which describes the set of CPUs | 175 | * Initialise the present map, which describes the set of CPUs |
173 | * actually populated at the present time. | 176 | * actually populated at the present time. |
174 | */ | 177 | */ |
175 | for (i = 0; i < max_cpus; i++) | 178 | for (i = 0; i < max_cpus; i++) |
176 | set_cpu_present(i, true); | 179 | set_cpu_present(i, true); |
177 | 180 | ||
178 | scu_enable(scu_base_addr()); | 181 | scu_enable(scu_base_addr()); |
179 | wakeup_secondary(); | 182 | wakeup_secondary(); |
180 | } | 183 | } |
181 | 184 |
arch/arm/mach-vexpress/ct-ca9x4.c
1 | /* | 1 | /* |
2 | * Versatile Express Core Tile Cortex A9x4 Support | 2 | * Versatile Express Core Tile Cortex A9x4 Support |
3 | */ | 3 | */ |
4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | #include <linux/gfp.h> | 5 | #include <linux/gfp.h> |
6 | #include <linux/device.h> | 6 | #include <linux/device.h> |
7 | #include <linux/dma-mapping.h> | 7 | #include <linux/dma-mapping.h> |
8 | #include <linux/platform_device.h> | 8 | #include <linux/platform_device.h> |
9 | #include <linux/amba/bus.h> | 9 | #include <linux/amba/bus.h> |
10 | #include <linux/amba/clcd.h> | 10 | #include <linux/amba/clcd.h> |
11 | #include <linux/clkdev.h> | 11 | #include <linux/clkdev.h> |
12 | 12 | ||
13 | #include <asm/hardware/arm_timer.h> | 13 | #include <asm/hardware/arm_timer.h> |
14 | #include <asm/hardware/cache-l2x0.h> | 14 | #include <asm/hardware/cache-l2x0.h> |
15 | #include <asm/hardware/gic.h> | 15 | #include <asm/hardware/gic.h> |
16 | #include <asm/pmu.h> | 16 | #include <asm/pmu.h> |
17 | #include <asm/smp_scu.h> | 17 | #include <asm/smp_scu.h> |
18 | #include <asm/smp_twd.h> | 18 | #include <asm/smp_twd.h> |
19 | 19 | ||
20 | #include <mach/ct-ca9x4.h> | 20 | #include <mach/ct-ca9x4.h> |
21 | 21 | ||
22 | #include <asm/hardware/timer-sp.h> | 22 | #include <asm/hardware/timer-sp.h> |
23 | 23 | ||
24 | #include <asm/mach/map.h> | 24 | #include <asm/mach/map.h> |
25 | #include <asm/mach/time.h> | 25 | #include <asm/mach/time.h> |
26 | 26 | ||
27 | #include "core.h" | 27 | #include "core.h" |
28 | 28 | ||
29 | #include <mach/motherboard.h> | 29 | #include <mach/motherboard.h> |
30 | 30 | ||
31 | #include <plat/clcd.h> | 31 | #include <plat/clcd.h> |
32 | 32 | ||
33 | #define V2M_PA_CS7 0x10000000 | 33 | #define V2M_PA_CS7 0x10000000 |
34 | 34 | ||
35 | static struct map_desc ct_ca9x4_io_desc[] __initdata = { | 35 | static struct map_desc ct_ca9x4_io_desc[] __initdata = { |
36 | { | 36 | { |
37 | .virtual = __MMIO_P2V(CT_CA9X4_MPIC), | 37 | .virtual = __MMIO_P2V(CT_CA9X4_MPIC), |
38 | .pfn = __phys_to_pfn(CT_CA9X4_MPIC), | 38 | .pfn = __phys_to_pfn(CT_CA9X4_MPIC), |
39 | .length = SZ_16K, | 39 | .length = SZ_16K, |
40 | .type = MT_DEVICE, | 40 | .type = MT_DEVICE, |
41 | }, { | 41 | }, { |
42 | .virtual = __MMIO_P2V(CT_CA9X4_SP804_TIMER), | 42 | .virtual = __MMIO_P2V(CT_CA9X4_SP804_TIMER), |
43 | .pfn = __phys_to_pfn(CT_CA9X4_SP804_TIMER), | 43 | .pfn = __phys_to_pfn(CT_CA9X4_SP804_TIMER), |
44 | .length = SZ_4K, | 44 | .length = SZ_4K, |
45 | .type = MT_DEVICE, | 45 | .type = MT_DEVICE, |
46 | }, { | 46 | }, { |
47 | .virtual = __MMIO_P2V(CT_CA9X4_L2CC), | 47 | .virtual = __MMIO_P2V(CT_CA9X4_L2CC), |
48 | .pfn = __phys_to_pfn(CT_CA9X4_L2CC), | 48 | .pfn = __phys_to_pfn(CT_CA9X4_L2CC), |
49 | .length = SZ_4K, | 49 | .length = SZ_4K, |
50 | .type = MT_DEVICE, | 50 | .type = MT_DEVICE, |
51 | }, | 51 | }, |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static void __init ct_ca9x4_map_io(void) | 54 | static void __init ct_ca9x4_map_io(void) |
55 | { | 55 | { |
56 | #ifdef CONFIG_LOCAL_TIMERS | 56 | #ifdef CONFIG_LOCAL_TIMERS |
57 | twd_base = MMIO_P2V(A9_MPCORE_TWD); | 57 | twd_base = MMIO_P2V(A9_MPCORE_TWD); |
58 | #endif | 58 | #endif |
59 | iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); | 59 | iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); |
60 | } | 60 | } |
61 | 61 | ||
62 | static void __init ct_ca9x4_init_irq(void) | 62 | static void __init ct_ca9x4_init_irq(void) |
63 | { | 63 | { |
64 | gic_init(0, 29, MMIO_P2V(A9_MPCORE_GIC_DIST), | 64 | gic_init(0, 29, MMIO_P2V(A9_MPCORE_GIC_DIST), |
65 | MMIO_P2V(A9_MPCORE_GIC_CPU)); | 65 | MMIO_P2V(A9_MPCORE_GIC_CPU)); |
66 | } | 66 | } |
67 | 67 | ||
68 | #if 0 | 68 | #if 0 |
69 | static void __init ct_ca9x4_timer_init(void) | 69 | static void __init ct_ca9x4_timer_init(void) |
70 | { | 70 | { |
71 | writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL); | 71 | writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL); |
72 | writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL); | 72 | writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL); |
73 | 73 | ||
74 | sp804_clocksource_init(MMIO_P2V(CT_CA9X4_TIMER1)); | 74 | sp804_clocksource_init(MMIO_P2V(CT_CA9X4_TIMER1)); |
75 | sp804_clockevents_init(MMIO_P2V(CT_CA9X4_TIMER0), IRQ_CT_CA9X4_TIMER0); | 75 | sp804_clockevents_init(MMIO_P2V(CT_CA9X4_TIMER0), IRQ_CT_CA9X4_TIMER0); |
76 | } | 76 | } |
77 | 77 | ||
78 | static struct sys_timer ct_ca9x4_timer = { | 78 | static struct sys_timer ct_ca9x4_timer = { |
79 | .init = ct_ca9x4_timer_init, | 79 | .init = ct_ca9x4_timer_init, |
80 | }; | 80 | }; |
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | static void ct_ca9x4_clcd_enable(struct clcd_fb *fb) | 83 | static void ct_ca9x4_clcd_enable(struct clcd_fb *fb) |
84 | { | 84 | { |
85 | v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE_DB1, 0); | 85 | v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE_DB1, 0); |
86 | v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE_DB1, 2); | 86 | v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE_DB1, 2); |
87 | } | 87 | } |
88 | 88 | ||
89 | static int ct_ca9x4_clcd_setup(struct clcd_fb *fb) | 89 | static int ct_ca9x4_clcd_setup(struct clcd_fb *fb) |
90 | { | 90 | { |
91 | unsigned long framesize = 1024 * 768 * 2; | 91 | unsigned long framesize = 1024 * 768 * 2; |
92 | 92 | ||
93 | fb->panel = versatile_clcd_get_panel("XVGA"); | 93 | fb->panel = versatile_clcd_get_panel("XVGA"); |
94 | if (!fb->panel) | 94 | if (!fb->panel) |
95 | return -EINVAL; | 95 | return -EINVAL; |
96 | 96 | ||
97 | return versatile_clcd_setup_dma(fb, framesize); | 97 | return versatile_clcd_setup_dma(fb, framesize); |
98 | } | 98 | } |
99 | 99 | ||
100 | static struct clcd_board ct_ca9x4_clcd_data = { | 100 | static struct clcd_board ct_ca9x4_clcd_data = { |
101 | .name = "CT-CA9X4", | 101 | .name = "CT-CA9X4", |
102 | .caps = CLCD_CAP_5551 | CLCD_CAP_565, | 102 | .caps = CLCD_CAP_5551 | CLCD_CAP_565, |
103 | .check = clcdfb_check, | 103 | .check = clcdfb_check, |
104 | .decode = clcdfb_decode, | 104 | .decode = clcdfb_decode, |
105 | .enable = ct_ca9x4_clcd_enable, | 105 | .enable = ct_ca9x4_clcd_enable, |
106 | .setup = ct_ca9x4_clcd_setup, | 106 | .setup = ct_ca9x4_clcd_setup, |
107 | .mmap = versatile_clcd_mmap_dma, | 107 | .mmap = versatile_clcd_mmap_dma, |
108 | .remove = versatile_clcd_remove_dma, | 108 | .remove = versatile_clcd_remove_dma, |
109 | }; | 109 | }; |
110 | 110 | ||
111 | static AMBA_DEVICE(clcd, "ct:clcd", CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data); | 111 | static AMBA_DEVICE(clcd, "ct:clcd", CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data); |
112 | static AMBA_DEVICE(dmc, "ct:dmc", CT_CA9X4_DMC, NULL); | 112 | static AMBA_DEVICE(dmc, "ct:dmc", CT_CA9X4_DMC, NULL); |
113 | static AMBA_DEVICE(smc, "ct:smc", CT_CA9X4_SMC, NULL); | 113 | static AMBA_DEVICE(smc, "ct:smc", CT_CA9X4_SMC, NULL); |
114 | static AMBA_DEVICE(gpio, "ct:gpio", CT_CA9X4_GPIO, NULL); | 114 | static AMBA_DEVICE(gpio, "ct:gpio", CT_CA9X4_GPIO, NULL); |
115 | 115 | ||
116 | static struct amba_device *ct_ca9x4_amba_devs[] __initdata = { | 116 | static struct amba_device *ct_ca9x4_amba_devs[] __initdata = { |
117 | &clcd_device, | 117 | &clcd_device, |
118 | &dmc_device, | 118 | &dmc_device, |
119 | &smc_device, | 119 | &smc_device, |
120 | &gpio_device, | 120 | &gpio_device, |
121 | }; | 121 | }; |
122 | 122 | ||
123 | 123 | ||
124 | static long ct_round(struct clk *clk, unsigned long rate) | 124 | static long ct_round(struct clk *clk, unsigned long rate) |
125 | { | 125 | { |
126 | return rate; | 126 | return rate; |
127 | } | 127 | } |
128 | 128 | ||
129 | static int ct_set(struct clk *clk, unsigned long rate) | 129 | static int ct_set(struct clk *clk, unsigned long rate) |
130 | { | 130 | { |
131 | return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_DB1 | 1, rate); | 131 | return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_DB1 | 1, rate); |
132 | } | 132 | } |
133 | 133 | ||
134 | static const struct clk_ops osc1_clk_ops = { | 134 | static const struct clk_ops osc1_clk_ops = { |
135 | .round = ct_round, | 135 | .round = ct_round, |
136 | .set = ct_set, | 136 | .set = ct_set, |
137 | }; | 137 | }; |
138 | 138 | ||
139 | static struct clk osc1_clk = { | 139 | static struct clk osc1_clk = { |
140 | .ops = &osc1_clk_ops, | 140 | .ops = &osc1_clk_ops, |
141 | .rate = 24000000, | 141 | .rate = 24000000, |
142 | }; | 142 | }; |
143 | 143 | ||
144 | static struct clk_lookup lookups[] = { | 144 | static struct clk_lookup lookups[] = { |
145 | { /* CLCD */ | 145 | { /* CLCD */ |
146 | .dev_id = "ct:clcd", | 146 | .dev_id = "ct:clcd", |
147 | .clk = &osc1_clk, | 147 | .clk = &osc1_clk, |
148 | }, | 148 | }, |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static struct resource pmu_resources[] = { | 151 | static struct resource pmu_resources[] = { |
152 | [0] = { | 152 | [0] = { |
153 | .start = IRQ_CT_CA9X4_PMU_CPU0, | 153 | .start = IRQ_CT_CA9X4_PMU_CPU0, |
154 | .end = IRQ_CT_CA9X4_PMU_CPU0, | 154 | .end = IRQ_CT_CA9X4_PMU_CPU0, |
155 | .flags = IORESOURCE_IRQ, | 155 | .flags = IORESOURCE_IRQ, |
156 | }, | 156 | }, |
157 | [1] = { | 157 | [1] = { |
158 | .start = IRQ_CT_CA9X4_PMU_CPU1, | 158 | .start = IRQ_CT_CA9X4_PMU_CPU1, |
159 | .end = IRQ_CT_CA9X4_PMU_CPU1, | 159 | .end = IRQ_CT_CA9X4_PMU_CPU1, |
160 | .flags = IORESOURCE_IRQ, | 160 | .flags = IORESOURCE_IRQ, |
161 | }, | 161 | }, |
162 | [2] = { | 162 | [2] = { |
163 | .start = IRQ_CT_CA9X4_PMU_CPU2, | 163 | .start = IRQ_CT_CA9X4_PMU_CPU2, |
164 | .end = IRQ_CT_CA9X4_PMU_CPU2, | 164 | .end = IRQ_CT_CA9X4_PMU_CPU2, |
165 | .flags = IORESOURCE_IRQ, | 165 | .flags = IORESOURCE_IRQ, |
166 | }, | 166 | }, |
167 | [3] = { | 167 | [3] = { |
168 | .start = IRQ_CT_CA9X4_PMU_CPU3, | 168 | .start = IRQ_CT_CA9X4_PMU_CPU3, |
169 | .end = IRQ_CT_CA9X4_PMU_CPU3, | 169 | .end = IRQ_CT_CA9X4_PMU_CPU3, |
170 | .flags = IORESOURCE_IRQ, | 170 | .flags = IORESOURCE_IRQ, |
171 | }, | 171 | }, |
172 | }; | 172 | }; |
173 | 173 | ||
174 | static struct platform_device pmu_device = { | 174 | static struct platform_device pmu_device = { |
175 | .name = "arm-pmu", | 175 | .name = "arm-pmu", |
176 | .id = ARM_PMU_DEVICE_CPU, | 176 | .id = ARM_PMU_DEVICE_CPU, |
177 | .num_resources = ARRAY_SIZE(pmu_resources), | 177 | .num_resources = ARRAY_SIZE(pmu_resources), |
178 | .resource = pmu_resources, | 178 | .resource = pmu_resources, |
179 | }; | 179 | }; |
180 | 180 | ||
181 | static void __init ct_ca9x4_init_early(void) | 181 | static void __init ct_ca9x4_init_early(void) |
182 | { | 182 | { |
183 | clkdev_add_table(lookups, ARRAY_SIZE(lookups)); | 183 | clkdev_add_table(lookups, ARRAY_SIZE(lookups)); |
184 | } | 184 | } |
185 | 185 | ||
186 | static void __init ct_ca9x4_init(void) | 186 | static void __init ct_ca9x4_init(void) |
187 | { | 187 | { |
188 | int i; | 188 | int i; |
189 | 189 | ||
190 | #ifdef CONFIG_CACHE_L2X0 | 190 | #ifdef CONFIG_CACHE_L2X0 |
191 | void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC); | 191 | void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC); |
192 | 192 | ||
193 | /* set RAM latencies to 1 cycle for this core tile. */ | 193 | /* set RAM latencies to 1 cycle for this core tile. */ |
194 | writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); | 194 | writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); |
195 | writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); | 195 | writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); |
196 | 196 | ||
197 | l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); | 197 | l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); |
198 | #endif | 198 | #endif |
199 | 199 | ||
200 | for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++) | 200 | for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++) |
201 | amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource); | 201 | amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource); |
202 | 202 | ||
203 | platform_device_register(&pmu_device); | 203 | platform_device_register(&pmu_device); |
204 | } | 204 | } |
205 | 205 | ||
206 | #ifdef CONFIG_SMP | 206 | #ifdef CONFIG_SMP |
207 | static void ct_ca9x4_init_cpu_map(void) | 207 | static void ct_ca9x4_init_cpu_map(void) |
208 | { | 208 | { |
209 | int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU)); | 209 | int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU)); |
210 | 210 | ||
211 | for (i = 0; i < ncores; ++i) | 211 | for (i = 0; i < ncores; ++i) |
212 | set_cpu_possible(i, true); | 212 | set_cpu_possible(i, true); |
213 | |||
214 | set_smp_cross_call(gic_raise_softirq); | ||
213 | } | 215 | } |
214 | 216 | ||
215 | static void ct_ca9x4_smp_enable(unsigned int max_cpus) | 217 | static void ct_ca9x4_smp_enable(unsigned int max_cpus) |
216 | { | 218 | { |
217 | int i; | 219 | int i; |
218 | for (i = 0; i < max_cpus; i++) | 220 | for (i = 0; i < max_cpus; i++) |
219 | set_cpu_present(i, true); | 221 | set_cpu_present(i, true); |
220 | 222 | ||
221 | scu_enable(MMIO_P2V(A9_MPCORE_SCU)); | 223 | scu_enable(MMIO_P2V(A9_MPCORE_SCU)); |
222 | } | 224 | } |
223 | #endif | 225 | #endif |
224 | 226 | ||
225 | struct ct_desc ct_ca9x4_desc __initdata = { | 227 | struct ct_desc ct_ca9x4_desc __initdata = { |
226 | .id = V2M_CT_ID_CA9, | 228 | .id = V2M_CT_ID_CA9, |
227 | .name = "CA9x4", | 229 | .name = "CA9x4", |
228 | .map_io = ct_ca9x4_map_io, | 230 | .map_io = ct_ca9x4_map_io, |
229 | .init_early = ct_ca9x4_init_early, | 231 | .init_early = ct_ca9x4_init_early, |
230 | .init_irq = ct_ca9x4_init_irq, | 232 | .init_irq = ct_ca9x4_init_irq, |
231 | .init_tile = ct_ca9x4_init, | 233 | .init_tile = ct_ca9x4_init, |
232 | #ifdef CONFIG_SMP | 234 | #ifdef CONFIG_SMP |
233 | .init_cpu_map = ct_ca9x4_init_cpu_map, | 235 | .init_cpu_map = ct_ca9x4_init_cpu_map, |
234 | .smp_enable = ct_ca9x4_smp_enable, | 236 | .smp_enable = ct_ca9x4_smp_enable, |
235 | #endif | 237 | #endif |
236 | }; | 238 | }; |
237 | 239 |
arch/arm/mach-vexpress/include/mach/smp.h
1 | #ifndef __MACH_SMP_H | File was deleted | |
2 | #define __MACH_SMP_H | ||
3 | |||
4 | #include <asm/hardware/gic.h> | ||
5 | |||
6 | /* | ||
7 | * We use IRQ1 as the IPI | ||
8 | */ | ||
9 | static inline void smp_cross_call(const struct cpumask *mask, int ipi) | ||
10 | { | ||
11 | gic_raise_softirq(mask, ipi); | ||
12 | } | ||
13 | #endif | ||
14 | 1 | #ifndef __MACH_SMP_H |
arch/arm/plat-omap/include/plat/smp.h
1 | /* | File was deleted | |
2 | * OMAP4 machine specific smp.h | ||
3 | * | ||
4 | * Copyright (C) 2009 Texas Instruments, Inc. | ||
5 | * | ||
6 | * Author: | ||
7 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
8 | * | ||
9 | * Interface functions needed for the SMP. This file is based on arm | ||
10 | * realview smp platform. | ||
11 | * Copyright (c) 2003 ARM Limited. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License version 2 as | ||
15 | * published by the Free Software Foundation. | ||
16 | */ | ||
17 | #ifndef OMAP_ARCH_SMP_H | ||
18 | #define OMAP_ARCH_SMP_H | ||
19 | |||
20 | #include <asm/hardware/gic.h> | ||
21 | |||
22 | /* | ||
23 | * We use Soft IRQ1 as the IPI | ||
24 | */ | ||
25 | static inline void smp_cross_call(const struct cpumask *mask, int ipi) | ||
26 | { | ||
27 | gic_raise_softirq(mask, ipi); | ||
28 | } | ||
29 | |||
30 | #endif | ||
31 | 1 | /* |
arch/arm/plat-versatile/platsmp.c
1 | /* | 1 | /* |
2 | * linux/arch/arm/plat-versatile/platsmp.c | 2 | * linux/arch/arm/plat-versatile/platsmp.c |
3 | * | 3 | * |
4 | * Copyright (C) 2002 ARM Ltd. | 4 | * Copyright (C) 2002 ARM Ltd. |
5 | * All Rights Reserved | 5 | * All Rights Reserved |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/jiffies.h> | 15 | #include <linux/jiffies.h> |
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | 17 | ||
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/hardware/gic.h> | ||
19 | 20 | ||
20 | /* | 21 | /* |
21 | * control for which core is the next to come out of the secondary | 22 | * control for which core is the next to come out of the secondary |
22 | * boot "holding pen" | 23 | * boot "holding pen" |
23 | */ | 24 | */ |
24 | volatile int __cpuinitdata pen_release = -1; | 25 | volatile int __cpuinitdata pen_release = -1; |
25 | 26 | ||
26 | /* | 27 | /* |
27 | * Write pen_release in a way that is guaranteed to be visible to all | 28 | * Write pen_release in a way that is guaranteed to be visible to all |
28 | * observers, irrespective of whether they're taking part in coherency | 29 | * observers, irrespective of whether they're taking part in coherency |
29 | * or not. This is necessary for the hotplug code to work reliably. | 30 | * or not. This is necessary for the hotplug code to work reliably. |
30 | */ | 31 | */ |
31 | static void __cpuinit write_pen_release(int val) | 32 | static void __cpuinit write_pen_release(int val) |
32 | { | 33 | { |
33 | pen_release = val; | 34 | pen_release = val; |
34 | smp_wmb(); | 35 | smp_wmb(); |
35 | __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); | 36 | __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); |
36 | outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); | 37 | outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); |
37 | } | 38 | } |
38 | 39 | ||
39 | static DEFINE_SPINLOCK(boot_lock); | 40 | static DEFINE_SPINLOCK(boot_lock); |
40 | 41 | ||
41 | void __cpuinit platform_secondary_init(unsigned int cpu) | 42 | void __cpuinit platform_secondary_init(unsigned int cpu) |
42 | { | 43 | { |
43 | /* | 44 | /* |
44 | * if any interrupts are already enabled for the primary | 45 | * if any interrupts are already enabled for the primary |
45 | * core (e.g. timer irq), then they will not have been enabled | 46 | * core (e.g. timer irq), then they will not have been enabled |
46 | * for us: do so | 47 | * for us: do so |
47 | */ | 48 | */ |
48 | gic_secondary_init(0); | 49 | gic_secondary_init(0); |
49 | 50 | ||
50 | /* | 51 | /* |
51 | * let the primary processor know we're out of the | 52 | * let the primary processor know we're out of the |
52 | * pen, then head off into the C entry point | 53 | * pen, then head off into the C entry point |
53 | */ | 54 | */ |
54 | write_pen_release(-1); | 55 | write_pen_release(-1); |
55 | 56 | ||
56 | /* | 57 | /* |
57 | * Synchronise with the boot thread. | 58 | * Synchronise with the boot thread. |
58 | */ | 59 | */ |
59 | spin_lock(&boot_lock); | 60 | spin_lock(&boot_lock); |
60 | spin_unlock(&boot_lock); | 61 | spin_unlock(&boot_lock); |
61 | } | 62 | } |
62 | 63 | ||
63 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 64 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
64 | { | 65 | { |
65 | unsigned long timeout; | 66 | unsigned long timeout; |
66 | 67 | ||
67 | /* | 68 | /* |
68 | * Set synchronisation state between this boot processor | 69 | * Set synchronisation state between this boot processor |
69 | * and the secondary one | 70 | * and the secondary one |
70 | */ | 71 | */ |
71 | spin_lock(&boot_lock); | 72 | spin_lock(&boot_lock); |
72 | 73 | ||
73 | /* | 74 | /* |
74 | * This is really belt and braces; we hold unintended secondary | 75 | * This is really belt and braces; we hold unintended secondary |
75 | * CPUs in the holding pen until we're ready for them. However, | 76 | * CPUs in the holding pen until we're ready for them. However, |
76 | * since we haven't sent them a soft interrupt, they shouldn't | 77 | * since we haven't sent them a soft interrupt, they shouldn't |
77 | * be there. | 78 | * be there. |
78 | */ | 79 | */ |
79 | write_pen_release(cpu); | 80 | write_pen_release(cpu); |
80 | 81 | ||
81 | /* | 82 | /* |
82 | * Send the secondary CPU a soft interrupt, thereby causing | 83 | * Send the secondary CPU a soft interrupt, thereby causing |
83 | * the boot monitor to read the system wide flags register, | 84 | * the boot monitor to read the system wide flags register, |
84 | * and branch to the address found there. | 85 | * and branch to the address found there. |
85 | */ | 86 | */ |
86 | smp_cross_call(cpumask_of(cpu), 1); | 87 | gic_raise_softirq(cpumask_of(cpu), 1); |
87 | 88 | ||
88 | timeout = jiffies + (1 * HZ); | 89 | timeout = jiffies + (1 * HZ); |
89 | while (time_before(jiffies, timeout)) { | 90 | while (time_before(jiffies, timeout)) { |
90 | smp_rmb(); | 91 | smp_rmb(); |
91 | if (pen_release == -1) | 92 | if (pen_release == -1) |
92 | break; | 93 | break; |
93 | 94 | ||
94 | udelay(10); | 95 | udelay(10); |
95 | } | 96 | } |
96 | 97 | ||
97 | /* | 98 | /* |
98 | * now the secondary core is starting up let it run its | 99 | * now the secondary core is starting up let it run its |
99 | * calibrations, then wait for it to finish | 100 | * calibrations, then wait for it to finish |
100 | */ | 101 | */ |
101 | spin_unlock(&boot_lock); | 102 | spin_unlock(&boot_lock); |
102 | 103 | ||
103 | return pen_release != -1 ? -ENOSYS : 0; | 104 | return pen_release != -1 ? -ENOSYS : 0; |
104 | } | 105 | } |
105 | 106 |