Commit fe45736f4134b9656c656ac5e15b915192f2704a
Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King: "The usual random collection of relatively small ARM fixes" * 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: ARM: 8063/1: bL_switcher: fix individual online status reporting of removed CPUs ARM: 8064/1: fix v7-M signal return ARM: 8057/1: amba: Add Qualcomm vendor ID. ARM: 8052/1: unwind: Fix handling of "Pop r4-r[4+nnn],r14" opcode ARM: 8051/1: put_user: fix possible data corruption in put_user ARM: 8048/1: fix v7-M setup stack location
Showing 6 changed files Inline Diff
1 | /* | 1 | /* |
2 | * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver | 2 | * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver |
3 | * | 3 | * |
4 | * Created by: Nicolas Pitre, March 2012 | 4 | * Created by: Nicolas Pitre, March 2012 |
5 | * Copyright: (C) 2012-2013 Linaro Limited | 5 | * Copyright: (C) 2012-2013 Linaro Limited |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/atomic.h> | 12 | #include <linux/atomic.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/cpu_pm.h> | 18 | #include <linux/cpu_pm.h> |
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/cpumask.h> | 20 | #include <linux/cpumask.h> |
21 | #include <linux/kthread.h> | 21 | #include <linux/kthread.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | #include <linux/time.h> | 23 | #include <linux/time.h> |
24 | #include <linux/clockchips.h> | 24 | #include <linux/clockchips.h> |
25 | #include <linux/hrtimer.h> | 25 | #include <linux/hrtimer.h> |
26 | #include <linux/tick.h> | 26 | #include <linux/tick.h> |
27 | #include <linux/notifier.h> | 27 | #include <linux/notifier.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
30 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/string.h> | 32 | #include <linux/string.h> |
33 | #include <linux/sysfs.h> | 33 | #include <linux/sysfs.h> |
34 | #include <linux/irqchip/arm-gic.h> | 34 | #include <linux/irqchip/arm-gic.h> |
35 | #include <linux/moduleparam.h> | 35 | #include <linux/moduleparam.h> |
36 | 36 | ||
37 | #include <asm/smp_plat.h> | 37 | #include <asm/smp_plat.h> |
38 | #include <asm/cputype.h> | 38 | #include <asm/cputype.h> |
39 | #include <asm/suspend.h> | 39 | #include <asm/suspend.h> |
40 | #include <asm/mcpm.h> | 40 | #include <asm/mcpm.h> |
41 | #include <asm/bL_switcher.h> | 41 | #include <asm/bL_switcher.h> |
42 | 42 | ||
43 | #define CREATE_TRACE_POINTS | 43 | #define CREATE_TRACE_POINTS |
44 | #include <trace/events/power_cpu_migrate.h> | 44 | #include <trace/events/power_cpu_migrate.h> |
45 | 45 | ||
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Use our own MPIDR accessors as the generic ones in asm/cputype.h have | 48 | * Use our own MPIDR accessors as the generic ones in asm/cputype.h have |
49 | * __attribute_const__ and we don't want the compiler to assume any | 49 | * __attribute_const__ and we don't want the compiler to assume any |
50 | * constness here as the value _does_ change along some code paths. | 50 | * constness here as the value _does_ change along some code paths. |
51 | */ | 51 | */ |
52 | 52 | ||
53 | static int read_mpidr(void) | 53 | static int read_mpidr(void) |
54 | { | 54 | { |
55 | unsigned int id; | 55 | unsigned int id; |
56 | asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); | 56 | asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); |
57 | return id & MPIDR_HWID_BITMASK; | 57 | return id & MPIDR_HWID_BITMASK; |
58 | } | 58 | } |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Get a global nanosecond time stamp for tracing. | 61 | * Get a global nanosecond time stamp for tracing. |
62 | */ | 62 | */ |
63 | static s64 get_ns(void) | 63 | static s64 get_ns(void) |
64 | { | 64 | { |
65 | struct timespec ts; | 65 | struct timespec ts; |
66 | getnstimeofday(&ts); | 66 | getnstimeofday(&ts); |
67 | return timespec_to_ns(&ts); | 67 | return timespec_to_ns(&ts); |
68 | } | 68 | } |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * bL switcher core code. | 71 | * bL switcher core code. |
72 | */ | 72 | */ |
73 | 73 | ||
74 | static void bL_do_switch(void *_arg) | 74 | static void bL_do_switch(void *_arg) |
75 | { | 75 | { |
76 | unsigned ib_mpidr, ib_cpu, ib_cluster; | 76 | unsigned ib_mpidr, ib_cpu, ib_cluster; |
77 | long volatile handshake, **handshake_ptr = _arg; | 77 | long volatile handshake, **handshake_ptr = _arg; |
78 | 78 | ||
79 | pr_debug("%s\n", __func__); | 79 | pr_debug("%s\n", __func__); |
80 | 80 | ||
81 | ib_mpidr = cpu_logical_map(smp_processor_id()); | 81 | ib_mpidr = cpu_logical_map(smp_processor_id()); |
82 | ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); | 82 | ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); |
83 | ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); | 83 | ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); |
84 | 84 | ||
85 | /* Advertise our handshake location */ | 85 | /* Advertise our handshake location */ |
86 | if (handshake_ptr) { | 86 | if (handshake_ptr) { |
87 | handshake = 0; | 87 | handshake = 0; |
88 | *handshake_ptr = &handshake; | 88 | *handshake_ptr = &handshake; |
89 | } else | 89 | } else |
90 | handshake = -1; | 90 | handshake = -1; |
91 | 91 | ||
92 | /* | 92 | /* |
93 | * Our state has been saved at this point. Let's release our | 93 | * Our state has been saved at this point. Let's release our |
94 | * inbound CPU. | 94 | * inbound CPU. |
95 | */ | 95 | */ |
96 | mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume); | 96 | mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume); |
97 | sev(); | 97 | sev(); |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * From this point, we must assume that our counterpart CPU might | 100 | * From this point, we must assume that our counterpart CPU might |
101 | * have taken over in its parallel world already, as if execution | 101 | * have taken over in its parallel world already, as if execution |
102 | * just returned from cpu_suspend(). It is therefore important to | 102 | * just returned from cpu_suspend(). It is therefore important to |
103 | * be very careful not to make any change the other guy is not | 103 | * be very careful not to make any change the other guy is not |
104 | * expecting. This is why we need stack isolation. | 104 | * expecting. This is why we need stack isolation. |
105 | * | 105 | * |
106 | * Fancy under cover tasks could be performed here. For now | 106 | * Fancy under cover tasks could be performed here. For now |
107 | * we have none. | 107 | * we have none. |
108 | */ | 108 | */ |
109 | 109 | ||
110 | /* | 110 | /* |
111 | * Let's wait until our inbound is alive. | 111 | * Let's wait until our inbound is alive. |
112 | */ | 112 | */ |
113 | while (!handshake) { | 113 | while (!handshake) { |
114 | wfe(); | 114 | wfe(); |
115 | smp_mb(); | 115 | smp_mb(); |
116 | } | 116 | } |
117 | 117 | ||
118 | /* Let's put ourself down. */ | 118 | /* Let's put ourself down. */ |
119 | mcpm_cpu_power_down(); | 119 | mcpm_cpu_power_down(); |
120 | 120 | ||
121 | /* should never get here */ | 121 | /* should never get here */ |
122 | BUG(); | 122 | BUG(); |
123 | } | 123 | } |
124 | 124 | ||
125 | /* | 125 | /* |
126 | * Stack isolation. To ensure 'current' remains valid, we just use another | 126 | * Stack isolation. To ensure 'current' remains valid, we just use another |
127 | * piece of our thread's stack space which should be fairly lightly used. | 127 | * piece of our thread's stack space which should be fairly lightly used. |
128 | * The selected area starts just above the thread_info structure located | 128 | * The selected area starts just above the thread_info structure located |
129 | * at the very bottom of the stack, aligned to a cache line, and indexed | 129 | * at the very bottom of the stack, aligned to a cache line, and indexed |
130 | * with the cluster number. | 130 | * with the cluster number. |
131 | */ | 131 | */ |
132 | #define STACK_SIZE 512 | 132 | #define STACK_SIZE 512 |
133 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); | 133 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); |
134 | static int bL_switchpoint(unsigned long _arg) | 134 | static int bL_switchpoint(unsigned long _arg) |
135 | { | 135 | { |
136 | unsigned int mpidr = read_mpidr(); | 136 | unsigned int mpidr = read_mpidr(); |
137 | unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 137 | unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
138 | void *stack = current_thread_info() + 1; | 138 | void *stack = current_thread_info() + 1; |
139 | stack = PTR_ALIGN(stack, L1_CACHE_BYTES); | 139 | stack = PTR_ALIGN(stack, L1_CACHE_BYTES); |
140 | stack += clusterid * STACK_SIZE + STACK_SIZE; | 140 | stack += clusterid * STACK_SIZE + STACK_SIZE; |
141 | call_with_stack(bL_do_switch, (void *)_arg, stack); | 141 | call_with_stack(bL_do_switch, (void *)_arg, stack); |
142 | BUG(); | 142 | BUG(); |
143 | } | 143 | } |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * Generic switcher interface | 146 | * Generic switcher interface |
147 | */ | 147 | */ |
148 | 148 | ||
149 | static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS]; | 149 | static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS]; |
150 | static int bL_switcher_cpu_pairing[NR_CPUS]; | 150 | static int bL_switcher_cpu_pairing[NR_CPUS]; |
151 | 151 | ||
152 | /* | 152 | /* |
153 | * bL_switch_to - Switch to a specific cluster for the current CPU | 153 | * bL_switch_to - Switch to a specific cluster for the current CPU |
154 | * @new_cluster_id: the ID of the cluster to switch to. | 154 | * @new_cluster_id: the ID of the cluster to switch to. |
155 | * | 155 | * |
156 | * This function must be called on the CPU to be switched. | 156 | * This function must be called on the CPU to be switched. |
157 | * Returns 0 on success, else a negative status code. | 157 | * Returns 0 on success, else a negative status code. |
158 | */ | 158 | */ |
159 | static int bL_switch_to(unsigned int new_cluster_id) | 159 | static int bL_switch_to(unsigned int new_cluster_id) |
160 | { | 160 | { |
161 | unsigned int mpidr, this_cpu, that_cpu; | 161 | unsigned int mpidr, this_cpu, that_cpu; |
162 | unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; | 162 | unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; |
163 | struct completion inbound_alive; | 163 | struct completion inbound_alive; |
164 | struct tick_device *tdev; | 164 | struct tick_device *tdev; |
165 | enum clock_event_mode tdev_mode; | 165 | enum clock_event_mode tdev_mode; |
166 | long volatile *handshake_ptr; | 166 | long volatile *handshake_ptr; |
167 | int ipi_nr, ret; | 167 | int ipi_nr, ret; |
168 | 168 | ||
169 | this_cpu = smp_processor_id(); | 169 | this_cpu = smp_processor_id(); |
170 | ob_mpidr = read_mpidr(); | 170 | ob_mpidr = read_mpidr(); |
171 | ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0); | 171 | ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0); |
172 | ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1); | 172 | ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1); |
173 | BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr); | 173 | BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr); |
174 | 174 | ||
175 | if (new_cluster_id == ob_cluster) | 175 | if (new_cluster_id == ob_cluster) |
176 | return 0; | 176 | return 0; |
177 | 177 | ||
178 | that_cpu = bL_switcher_cpu_pairing[this_cpu]; | 178 | that_cpu = bL_switcher_cpu_pairing[this_cpu]; |
179 | ib_mpidr = cpu_logical_map(that_cpu); | 179 | ib_mpidr = cpu_logical_map(that_cpu); |
180 | ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); | 180 | ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); |
181 | ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); | 181 | ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); |
182 | 182 | ||
183 | pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n", | 183 | pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n", |
184 | this_cpu, ob_mpidr, ib_mpidr); | 184 | this_cpu, ob_mpidr, ib_mpidr); |
185 | 185 | ||
186 | this_cpu = smp_processor_id(); | 186 | this_cpu = smp_processor_id(); |
187 | 187 | ||
188 | /* Close the gate for our entry vectors */ | 188 | /* Close the gate for our entry vectors */ |
189 | mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL); | 189 | mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL); |
190 | mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL); | 190 | mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL); |
191 | 191 | ||
192 | /* Install our "inbound alive" notifier. */ | 192 | /* Install our "inbound alive" notifier. */ |
193 | init_completion(&inbound_alive); | 193 | init_completion(&inbound_alive); |
194 | ipi_nr = register_ipi_completion(&inbound_alive, this_cpu); | 194 | ipi_nr = register_ipi_completion(&inbound_alive, this_cpu); |
195 | ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]); | 195 | ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]); |
196 | mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr); | 196 | mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr); |
197 | 197 | ||
198 | /* | 198 | /* |
199 | * Let's wake up the inbound CPU now in case it requires some delay | 199 | * Let's wake up the inbound CPU now in case it requires some delay |
200 | * to come online, but leave it gated in our entry vector code. | 200 | * to come online, but leave it gated in our entry vector code. |
201 | */ | 201 | */ |
202 | ret = mcpm_cpu_power_up(ib_cpu, ib_cluster); | 202 | ret = mcpm_cpu_power_up(ib_cpu, ib_cluster); |
203 | if (ret) { | 203 | if (ret) { |
204 | pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret); | 204 | pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret); |
205 | return ret; | 205 | return ret; |
206 | } | 206 | } |
207 | 207 | ||
208 | /* | 208 | /* |
209 | * Raise a SGI on the inbound CPU to make sure it doesn't stall | 209 | * Raise a SGI on the inbound CPU to make sure it doesn't stall |
210 | * in a possible WFI, such as in bL_power_down(). | 210 | * in a possible WFI, such as in bL_power_down(). |
211 | */ | 211 | */ |
212 | gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0); | 212 | gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0); |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * Wait for the inbound to come up. This allows for other | 215 | * Wait for the inbound to come up. This allows for other |
216 | * tasks to be scheduled in the mean time. | 216 | * tasks to be scheduled in the mean time. |
217 | */ | 217 | */ |
218 | wait_for_completion(&inbound_alive); | 218 | wait_for_completion(&inbound_alive); |
219 | mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0); | 219 | mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0); |
220 | 220 | ||
221 | /* | 221 | /* |
222 | * From this point we are entering the switch critical zone | 222 | * From this point we are entering the switch critical zone |
223 | * and can't take any interrupts anymore. | 223 | * and can't take any interrupts anymore. |
224 | */ | 224 | */ |
225 | local_irq_disable(); | 225 | local_irq_disable(); |
226 | local_fiq_disable(); | 226 | local_fiq_disable(); |
227 | trace_cpu_migrate_begin(get_ns(), ob_mpidr); | 227 | trace_cpu_migrate_begin(get_ns(), ob_mpidr); |
228 | 228 | ||
229 | /* redirect GIC's SGIs to our counterpart */ | 229 | /* redirect GIC's SGIs to our counterpart */ |
230 | gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); | 230 | gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); |
231 | 231 | ||
232 | tdev = tick_get_device(this_cpu); | 232 | tdev = tick_get_device(this_cpu); |
233 | if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu))) | 233 | if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu))) |
234 | tdev = NULL; | 234 | tdev = NULL; |
235 | if (tdev) { | 235 | if (tdev) { |
236 | tdev_mode = tdev->evtdev->mode; | 236 | tdev_mode = tdev->evtdev->mode; |
237 | clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | 237 | clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN); |
238 | } | 238 | } |
239 | 239 | ||
240 | ret = cpu_pm_enter(); | 240 | ret = cpu_pm_enter(); |
241 | 241 | ||
242 | /* we can not tolerate errors at this point */ | 242 | /* we can not tolerate errors at this point */ |
243 | if (ret) | 243 | if (ret) |
244 | panic("%s: cpu_pm_enter() returned %d\n", __func__, ret); | 244 | panic("%s: cpu_pm_enter() returned %d\n", __func__, ret); |
245 | 245 | ||
246 | /* Swap the physical CPUs in the logical map for this logical CPU. */ | 246 | /* Swap the physical CPUs in the logical map for this logical CPU. */ |
247 | cpu_logical_map(this_cpu) = ib_mpidr; | 247 | cpu_logical_map(this_cpu) = ib_mpidr; |
248 | cpu_logical_map(that_cpu) = ob_mpidr; | 248 | cpu_logical_map(that_cpu) = ob_mpidr; |
249 | 249 | ||
250 | /* Let's do the actual CPU switch. */ | 250 | /* Let's do the actual CPU switch. */ |
251 | ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint); | 251 | ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint); |
252 | if (ret > 0) | 252 | if (ret > 0) |
253 | panic("%s: cpu_suspend() returned %d\n", __func__, ret); | 253 | panic("%s: cpu_suspend() returned %d\n", __func__, ret); |
254 | 254 | ||
255 | /* We are executing on the inbound CPU at this point */ | 255 | /* We are executing on the inbound CPU at this point */ |
256 | mpidr = read_mpidr(); | 256 | mpidr = read_mpidr(); |
257 | pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr); | 257 | pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr); |
258 | BUG_ON(mpidr != ib_mpidr); | 258 | BUG_ON(mpidr != ib_mpidr); |
259 | 259 | ||
260 | mcpm_cpu_powered_up(); | 260 | mcpm_cpu_powered_up(); |
261 | 261 | ||
262 | ret = cpu_pm_exit(); | 262 | ret = cpu_pm_exit(); |
263 | 263 | ||
264 | if (tdev) { | 264 | if (tdev) { |
265 | clockevents_set_mode(tdev->evtdev, tdev_mode); | 265 | clockevents_set_mode(tdev->evtdev, tdev_mode); |
266 | clockevents_program_event(tdev->evtdev, | 266 | clockevents_program_event(tdev->evtdev, |
267 | tdev->evtdev->next_event, 1); | 267 | tdev->evtdev->next_event, 1); |
268 | } | 268 | } |
269 | 269 | ||
270 | trace_cpu_migrate_finish(get_ns(), ib_mpidr); | 270 | trace_cpu_migrate_finish(get_ns(), ib_mpidr); |
271 | local_fiq_enable(); | 271 | local_fiq_enable(); |
272 | local_irq_enable(); | 272 | local_irq_enable(); |
273 | 273 | ||
274 | *handshake_ptr = 1; | 274 | *handshake_ptr = 1; |
275 | dsb_sev(); | 275 | dsb_sev(); |
276 | 276 | ||
277 | if (ret) | 277 | if (ret) |
278 | pr_err("%s exiting with error %d\n", __func__, ret); | 278 | pr_err("%s exiting with error %d\n", __func__, ret); |
279 | return ret; | 279 | return ret; |
280 | } | 280 | } |
281 | 281 | ||
282 | struct bL_thread { | 282 | struct bL_thread { |
283 | spinlock_t lock; | 283 | spinlock_t lock; |
284 | struct task_struct *task; | 284 | struct task_struct *task; |
285 | wait_queue_head_t wq; | 285 | wait_queue_head_t wq; |
286 | int wanted_cluster; | 286 | int wanted_cluster; |
287 | struct completion started; | 287 | struct completion started; |
288 | bL_switch_completion_handler completer; | 288 | bL_switch_completion_handler completer; |
289 | void *completer_cookie; | 289 | void *completer_cookie; |
290 | }; | 290 | }; |
291 | 291 | ||
292 | static struct bL_thread bL_threads[NR_CPUS]; | 292 | static struct bL_thread bL_threads[NR_CPUS]; |
293 | 293 | ||
294 | static int bL_switcher_thread(void *arg) | 294 | static int bL_switcher_thread(void *arg) |
295 | { | 295 | { |
296 | struct bL_thread *t = arg; | 296 | struct bL_thread *t = arg; |
297 | struct sched_param param = { .sched_priority = 1 }; | 297 | struct sched_param param = { .sched_priority = 1 }; |
298 | int cluster; | 298 | int cluster; |
299 | bL_switch_completion_handler completer; | 299 | bL_switch_completion_handler completer; |
300 | void *completer_cookie; | 300 | void *completer_cookie; |
301 | 301 | ||
302 | sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); | 302 | sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); |
303 | complete(&t->started); | 303 | complete(&t->started); |
304 | 304 | ||
305 | do { | 305 | do { |
306 | if (signal_pending(current)) | 306 | if (signal_pending(current)) |
307 | flush_signals(current); | 307 | flush_signals(current); |
308 | wait_event_interruptible(t->wq, | 308 | wait_event_interruptible(t->wq, |
309 | t->wanted_cluster != -1 || | 309 | t->wanted_cluster != -1 || |
310 | kthread_should_stop()); | 310 | kthread_should_stop()); |
311 | 311 | ||
312 | spin_lock(&t->lock); | 312 | spin_lock(&t->lock); |
313 | cluster = t->wanted_cluster; | 313 | cluster = t->wanted_cluster; |
314 | completer = t->completer; | 314 | completer = t->completer; |
315 | completer_cookie = t->completer_cookie; | 315 | completer_cookie = t->completer_cookie; |
316 | t->wanted_cluster = -1; | 316 | t->wanted_cluster = -1; |
317 | t->completer = NULL; | 317 | t->completer = NULL; |
318 | spin_unlock(&t->lock); | 318 | spin_unlock(&t->lock); |
319 | 319 | ||
320 | if (cluster != -1) { | 320 | if (cluster != -1) { |
321 | bL_switch_to(cluster); | 321 | bL_switch_to(cluster); |
322 | 322 | ||
323 | if (completer) | 323 | if (completer) |
324 | completer(completer_cookie); | 324 | completer(completer_cookie); |
325 | } | 325 | } |
326 | } while (!kthread_should_stop()); | 326 | } while (!kthread_should_stop()); |
327 | 327 | ||
328 | return 0; | 328 | return 0; |
329 | } | 329 | } |
330 | 330 | ||
331 | static struct task_struct *bL_switcher_thread_create(int cpu, void *arg) | 331 | static struct task_struct *bL_switcher_thread_create(int cpu, void *arg) |
332 | { | 332 | { |
333 | struct task_struct *task; | 333 | struct task_struct *task; |
334 | 334 | ||
335 | task = kthread_create_on_node(bL_switcher_thread, arg, | 335 | task = kthread_create_on_node(bL_switcher_thread, arg, |
336 | cpu_to_node(cpu), "kswitcher_%d", cpu); | 336 | cpu_to_node(cpu), "kswitcher_%d", cpu); |
337 | if (!IS_ERR(task)) { | 337 | if (!IS_ERR(task)) { |
338 | kthread_bind(task, cpu); | 338 | kthread_bind(task, cpu); |
339 | wake_up_process(task); | 339 | wake_up_process(task); |
340 | } else | 340 | } else |
341 | pr_err("%s failed for CPU %d\n", __func__, cpu); | 341 | pr_err("%s failed for CPU %d\n", __func__, cpu); |
342 | return task; | 342 | return task; |
343 | } | 343 | } |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * bL_switch_request_cb - Switch to a specific cluster for the given CPU, | 346 | * bL_switch_request_cb - Switch to a specific cluster for the given CPU, |
347 | * with completion notification via a callback | 347 | * with completion notification via a callback |
348 | * | 348 | * |
349 | * @cpu: the CPU to switch | 349 | * @cpu: the CPU to switch |
350 | * @new_cluster_id: the ID of the cluster to switch to. | 350 | * @new_cluster_id: the ID of the cluster to switch to. |
351 | * @completer: switch completion callback. if non-NULL, | 351 | * @completer: switch completion callback. if non-NULL, |
352 | * @completer(@completer_cookie) will be called on completion of | 352 | * @completer(@completer_cookie) will be called on completion of |
353 | * the switch, in non-atomic context. | 353 | * the switch, in non-atomic context. |
354 | * @completer_cookie: opaque context argument for @completer. | 354 | * @completer_cookie: opaque context argument for @completer. |
355 | * | 355 | * |
356 | * This function causes a cluster switch on the given CPU by waking up | 356 | * This function causes a cluster switch on the given CPU by waking up |
357 | * the appropriate switcher thread. This function may or may not return | 357 | * the appropriate switcher thread. This function may or may not return |
358 | * before the switch has occurred. | 358 | * before the switch has occurred. |
359 | * | 359 | * |
360 | * If a @completer callback function is supplied, it will be called when | 360 | * If a @completer callback function is supplied, it will be called when |
361 | * the switch is complete. This can be used to determine asynchronously | 361 | * the switch is complete. This can be used to determine asynchronously |
362 | * when the switch is complete, regardless of when bL_switch_request() | 362 | * when the switch is complete, regardless of when bL_switch_request() |
363 | * returns. When @completer is supplied, no new switch request is permitted | 363 | * returns. When @completer is supplied, no new switch request is permitted |
364 | * for the affected CPU until after the switch is complete, and @completer | 364 | * for the affected CPU until after the switch is complete, and @completer |
365 | * has returned. | 365 | * has returned. |
366 | */ | 366 | */ |
367 | int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, | 367 | int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, |
368 | bL_switch_completion_handler completer, | 368 | bL_switch_completion_handler completer, |
369 | void *completer_cookie) | 369 | void *completer_cookie) |
370 | { | 370 | { |
371 | struct bL_thread *t; | 371 | struct bL_thread *t; |
372 | 372 | ||
373 | if (cpu >= ARRAY_SIZE(bL_threads)) { | 373 | if (cpu >= ARRAY_SIZE(bL_threads)) { |
374 | pr_err("%s: cpu %d out of bounds\n", __func__, cpu); | 374 | pr_err("%s: cpu %d out of bounds\n", __func__, cpu); |
375 | return -EINVAL; | 375 | return -EINVAL; |
376 | } | 376 | } |
377 | 377 | ||
378 | t = &bL_threads[cpu]; | 378 | t = &bL_threads[cpu]; |
379 | 379 | ||
380 | if (IS_ERR(t->task)) | 380 | if (IS_ERR(t->task)) |
381 | return PTR_ERR(t->task); | 381 | return PTR_ERR(t->task); |
382 | if (!t->task) | 382 | if (!t->task) |
383 | return -ESRCH; | 383 | return -ESRCH; |
384 | 384 | ||
385 | spin_lock(&t->lock); | 385 | spin_lock(&t->lock); |
386 | if (t->completer) { | 386 | if (t->completer) { |
387 | spin_unlock(&t->lock); | 387 | spin_unlock(&t->lock); |
388 | return -EBUSY; | 388 | return -EBUSY; |
389 | } | 389 | } |
390 | t->completer = completer; | 390 | t->completer = completer; |
391 | t->completer_cookie = completer_cookie; | 391 | t->completer_cookie = completer_cookie; |
392 | t->wanted_cluster = new_cluster_id; | 392 | t->wanted_cluster = new_cluster_id; |
393 | spin_unlock(&t->lock); | 393 | spin_unlock(&t->lock); |
394 | wake_up(&t->wq); | 394 | wake_up(&t->wq); |
395 | return 0; | 395 | return 0; |
396 | } | 396 | } |
397 | EXPORT_SYMBOL_GPL(bL_switch_request_cb); | 397 | EXPORT_SYMBOL_GPL(bL_switch_request_cb); |
398 | 398 | ||
399 | /* | 399 | /* |
400 | * Activation and configuration code. | 400 | * Activation and configuration code. |
401 | */ | 401 | */ |
402 | 402 | ||
403 | static DEFINE_MUTEX(bL_switcher_activation_lock); | 403 | static DEFINE_MUTEX(bL_switcher_activation_lock); |
404 | static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier); | 404 | static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier); |
405 | static unsigned int bL_switcher_active; | 405 | static unsigned int bL_switcher_active; |
406 | static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS]; | 406 | static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS]; |
407 | static cpumask_t bL_switcher_removed_logical_cpus; | 407 | static cpumask_t bL_switcher_removed_logical_cpus; |
408 | 408 | ||
409 | int bL_switcher_register_notifier(struct notifier_block *nb) | 409 | int bL_switcher_register_notifier(struct notifier_block *nb) |
410 | { | 410 | { |
411 | return blocking_notifier_chain_register(&bL_activation_notifier, nb); | 411 | return blocking_notifier_chain_register(&bL_activation_notifier, nb); |
412 | } | 412 | } |
413 | EXPORT_SYMBOL_GPL(bL_switcher_register_notifier); | 413 | EXPORT_SYMBOL_GPL(bL_switcher_register_notifier); |
414 | 414 | ||
415 | int bL_switcher_unregister_notifier(struct notifier_block *nb) | 415 | int bL_switcher_unregister_notifier(struct notifier_block *nb) |
416 | { | 416 | { |
417 | return blocking_notifier_chain_unregister(&bL_activation_notifier, nb); | 417 | return blocking_notifier_chain_unregister(&bL_activation_notifier, nb); |
418 | } | 418 | } |
419 | EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier); | 419 | EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier); |
420 | 420 | ||
421 | static int bL_activation_notify(unsigned long val) | 421 | static int bL_activation_notify(unsigned long val) |
422 | { | 422 | { |
423 | int ret; | 423 | int ret; |
424 | 424 | ||
425 | ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL); | 425 | ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL); |
426 | if (ret & NOTIFY_STOP_MASK) | 426 | if (ret & NOTIFY_STOP_MASK) |
427 | pr_err("%s: notifier chain failed with status 0x%x\n", | 427 | pr_err("%s: notifier chain failed with status 0x%x\n", |
428 | __func__, ret); | 428 | __func__, ret); |
429 | return notifier_to_errno(ret); | 429 | return notifier_to_errno(ret); |
430 | } | 430 | } |
431 | 431 | ||
432 | static void bL_switcher_restore_cpus(void) | 432 | static void bL_switcher_restore_cpus(void) |
433 | { | 433 | { |
434 | int i; | 434 | int i; |
435 | 435 | ||
436 | for_each_cpu(i, &bL_switcher_removed_logical_cpus) | 436 | for_each_cpu(i, &bL_switcher_removed_logical_cpus) { |
437 | cpu_up(i); | 437 | struct device *cpu_dev = get_cpu_device(i); |
438 | int ret = device_online(cpu_dev); | ||
439 | if (ret) | ||
440 | dev_err(cpu_dev, "switcher: unable to restore CPU\n"); | ||
441 | } | ||
438 | } | 442 | } |
439 | 443 | ||
440 | static int bL_switcher_halve_cpus(void) | 444 | static int bL_switcher_halve_cpus(void) |
441 | { | 445 | { |
442 | int i, j, cluster_0, gic_id, ret; | 446 | int i, j, cluster_0, gic_id, ret; |
443 | unsigned int cpu, cluster, mask; | 447 | unsigned int cpu, cluster, mask; |
444 | cpumask_t available_cpus; | 448 | cpumask_t available_cpus; |
445 | 449 | ||
446 | /* First pass to validate what we have */ | 450 | /* First pass to validate what we have */ |
447 | mask = 0; | 451 | mask = 0; |
448 | for_each_online_cpu(i) { | 452 | for_each_online_cpu(i) { |
449 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); | 453 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); |
450 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); | 454 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); |
451 | if (cluster >= 2) { | 455 | if (cluster >= 2) { |
452 | pr_err("%s: only dual cluster systems are supported\n", __func__); | 456 | pr_err("%s: only dual cluster systems are supported\n", __func__); |
453 | return -EINVAL; | 457 | return -EINVAL; |
454 | } | 458 | } |
455 | if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER)) | 459 | if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER)) |
456 | return -EINVAL; | 460 | return -EINVAL; |
457 | mask |= (1 << cluster); | 461 | mask |= (1 << cluster); |
458 | } | 462 | } |
459 | if (mask != 3) { | 463 | if (mask != 3) { |
460 | pr_err("%s: no CPU pairing possible\n", __func__); | 464 | pr_err("%s: no CPU pairing possible\n", __func__); |
461 | return -EINVAL; | 465 | return -EINVAL; |
462 | } | 466 | } |
463 | 467 | ||
464 | /* | 468 | /* |
465 | * Now let's do the pairing. We match each CPU with another CPU | 469 | * Now let's do the pairing. We match each CPU with another CPU |
466 | * from a different cluster. To get a uniform scheduling behavior | 470 | * from a different cluster. To get a uniform scheduling behavior |
467 | * without fiddling with CPU topology and compute capacity data, | 471 | * without fiddling with CPU topology and compute capacity data, |
468 | * we'll use logical CPUs initially belonging to the same cluster. | 472 | * we'll use logical CPUs initially belonging to the same cluster. |
469 | */ | 473 | */ |
470 | memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing)); | 474 | memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing)); |
471 | cpumask_copy(&available_cpus, cpu_online_mask); | 475 | cpumask_copy(&available_cpus, cpu_online_mask); |
472 | cluster_0 = -1; | 476 | cluster_0 = -1; |
473 | for_each_cpu(i, &available_cpus) { | 477 | for_each_cpu(i, &available_cpus) { |
474 | int match = -1; | 478 | int match = -1; |
475 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); | 479 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); |
476 | if (cluster_0 == -1) | 480 | if (cluster_0 == -1) |
477 | cluster_0 = cluster; | 481 | cluster_0 = cluster; |
478 | if (cluster != cluster_0) | 482 | if (cluster != cluster_0) |
479 | continue; | 483 | continue; |
480 | cpumask_clear_cpu(i, &available_cpus); | 484 | cpumask_clear_cpu(i, &available_cpus); |
481 | for_each_cpu(j, &available_cpus) { | 485 | for_each_cpu(j, &available_cpus) { |
482 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1); | 486 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1); |
483 | /* | 487 | /* |
484 | * Let's remember the last match to create "odd" | 488 | * Let's remember the last match to create "odd" |
485 | * pairings on purpose in order for other code not | 489 | * pairings on purpose in order for other code not |
486 | * to assume any relation between physical and | 490 | * to assume any relation between physical and |
487 | * logical CPU numbers. | 491 | * logical CPU numbers. |
488 | */ | 492 | */ |
489 | if (cluster != cluster_0) | 493 | if (cluster != cluster_0) |
490 | match = j; | 494 | match = j; |
491 | } | 495 | } |
492 | if (match != -1) { | 496 | if (match != -1) { |
493 | bL_switcher_cpu_pairing[i] = match; | 497 | bL_switcher_cpu_pairing[i] = match; |
494 | cpumask_clear_cpu(match, &available_cpus); | 498 | cpumask_clear_cpu(match, &available_cpus); |
495 | pr_info("CPU%d paired with CPU%d\n", i, match); | 499 | pr_info("CPU%d paired with CPU%d\n", i, match); |
496 | } | 500 | } |
497 | } | 501 | } |
498 | 502 | ||
499 | /* | 503 | /* |
500 | * Now we disable the unwanted CPUs i.e. everything that has no | 504 | * Now we disable the unwanted CPUs i.e. everything that has no |
501 | * pairing information (that includes the pairing counterparts). | 505 | * pairing information (that includes the pairing counterparts). |
502 | */ | 506 | */ |
503 | cpumask_clear(&bL_switcher_removed_logical_cpus); | 507 | cpumask_clear(&bL_switcher_removed_logical_cpus); |
504 | for_each_online_cpu(i) { | 508 | for_each_online_cpu(i) { |
505 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); | 509 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); |
506 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); | 510 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); |
507 | 511 | ||
508 | /* Let's take note of the GIC ID for this CPU */ | 512 | /* Let's take note of the GIC ID for this CPU */ |
509 | gic_id = gic_get_cpu_id(i); | 513 | gic_id = gic_get_cpu_id(i); |
510 | if (gic_id < 0) { | 514 | if (gic_id < 0) { |
511 | pr_err("%s: bad GIC ID for CPU %d\n", __func__, i); | 515 | pr_err("%s: bad GIC ID for CPU %d\n", __func__, i); |
512 | bL_switcher_restore_cpus(); | 516 | bL_switcher_restore_cpus(); |
513 | return -EINVAL; | 517 | return -EINVAL; |
514 | } | 518 | } |
515 | bL_gic_id[cpu][cluster] = gic_id; | 519 | bL_gic_id[cpu][cluster] = gic_id; |
516 | pr_info("GIC ID for CPU %u cluster %u is %u\n", | 520 | pr_info("GIC ID for CPU %u cluster %u is %u\n", |
517 | cpu, cluster, gic_id); | 521 | cpu, cluster, gic_id); |
518 | 522 | ||
519 | if (bL_switcher_cpu_pairing[i] != -1) { | 523 | if (bL_switcher_cpu_pairing[i] != -1) { |
520 | bL_switcher_cpu_original_cluster[i] = cluster; | 524 | bL_switcher_cpu_original_cluster[i] = cluster; |
521 | continue; | 525 | continue; |
522 | } | 526 | } |
523 | 527 | ||
524 | ret = cpu_down(i); | 528 | ret = device_offline(get_cpu_device(i)); |
525 | if (ret) { | 529 | if (ret) { |
526 | bL_switcher_restore_cpus(); | 530 | bL_switcher_restore_cpus(); |
527 | return ret; | 531 | return ret; |
528 | } | 532 | } |
529 | cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus); | 533 | cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus); |
530 | } | 534 | } |
531 | 535 | ||
532 | return 0; | 536 | return 0; |
533 | } | 537 | } |
534 | 538 | ||
535 | /* Determine the logical CPU a given physical CPU is grouped on. */ | 539 | /* Determine the logical CPU a given physical CPU is grouped on. */ |
536 | int bL_switcher_get_logical_index(u32 mpidr) | 540 | int bL_switcher_get_logical_index(u32 mpidr) |
537 | { | 541 | { |
538 | int cpu; | 542 | int cpu; |
539 | 543 | ||
540 | if (!bL_switcher_active) | 544 | if (!bL_switcher_active) |
541 | return -EUNATCH; | 545 | return -EUNATCH; |
542 | 546 | ||
543 | mpidr &= MPIDR_HWID_BITMASK; | 547 | mpidr &= MPIDR_HWID_BITMASK; |
544 | for_each_online_cpu(cpu) { | 548 | for_each_online_cpu(cpu) { |
545 | int pairing = bL_switcher_cpu_pairing[cpu]; | 549 | int pairing = bL_switcher_cpu_pairing[cpu]; |
546 | if (pairing == -1) | 550 | if (pairing == -1) |
547 | continue; | 551 | continue; |
548 | if ((mpidr == cpu_logical_map(cpu)) || | 552 | if ((mpidr == cpu_logical_map(cpu)) || |
549 | (mpidr == cpu_logical_map(pairing))) | 553 | (mpidr == cpu_logical_map(pairing))) |
550 | return cpu; | 554 | return cpu; |
551 | } | 555 | } |
552 | return -EINVAL; | 556 | return -EINVAL; |
553 | } | 557 | } |
554 | 558 | ||
555 | static void bL_switcher_trace_trigger_cpu(void *__always_unused info) | 559 | static void bL_switcher_trace_trigger_cpu(void *__always_unused info) |
556 | { | 560 | { |
557 | trace_cpu_migrate_current(get_ns(), read_mpidr()); | 561 | trace_cpu_migrate_current(get_ns(), read_mpidr()); |
558 | } | 562 | } |
559 | 563 | ||
560 | int bL_switcher_trace_trigger(void) | 564 | int bL_switcher_trace_trigger(void) |
561 | { | 565 | { |
562 | int ret; | 566 | int ret; |
563 | 567 | ||
564 | preempt_disable(); | 568 | preempt_disable(); |
565 | 569 | ||
566 | bL_switcher_trace_trigger_cpu(NULL); | 570 | bL_switcher_trace_trigger_cpu(NULL); |
567 | ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true); | 571 | ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true); |
568 | 572 | ||
569 | preempt_enable(); | 573 | preempt_enable(); |
570 | 574 | ||
571 | return ret; | 575 | return ret; |
572 | } | 576 | } |
573 | EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger); | 577 | EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger); |
574 | 578 | ||
575 | static int bL_switcher_enable(void) | 579 | static int bL_switcher_enable(void) |
576 | { | 580 | { |
577 | int cpu, ret; | 581 | int cpu, ret; |
578 | 582 | ||
579 | mutex_lock(&bL_switcher_activation_lock); | 583 | mutex_lock(&bL_switcher_activation_lock); |
580 | lock_device_hotplug(); | 584 | lock_device_hotplug(); |
581 | if (bL_switcher_active) { | 585 | if (bL_switcher_active) { |
582 | unlock_device_hotplug(); | 586 | unlock_device_hotplug(); |
583 | mutex_unlock(&bL_switcher_activation_lock); | 587 | mutex_unlock(&bL_switcher_activation_lock); |
584 | return 0; | 588 | return 0; |
585 | } | 589 | } |
586 | 590 | ||
587 | pr_info("big.LITTLE switcher initializing\n"); | 591 | pr_info("big.LITTLE switcher initializing\n"); |
588 | 592 | ||
589 | ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE); | 593 | ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE); |
590 | if (ret) | 594 | if (ret) |
591 | goto error; | 595 | goto error; |
592 | 596 | ||
593 | ret = bL_switcher_halve_cpus(); | 597 | ret = bL_switcher_halve_cpus(); |
594 | if (ret) | 598 | if (ret) |
595 | goto error; | 599 | goto error; |
596 | 600 | ||
597 | bL_switcher_trace_trigger(); | 601 | bL_switcher_trace_trigger(); |
598 | 602 | ||
599 | for_each_online_cpu(cpu) { | 603 | for_each_online_cpu(cpu) { |
600 | struct bL_thread *t = &bL_threads[cpu]; | 604 | struct bL_thread *t = &bL_threads[cpu]; |
601 | spin_lock_init(&t->lock); | 605 | spin_lock_init(&t->lock); |
602 | init_waitqueue_head(&t->wq); | 606 | init_waitqueue_head(&t->wq); |
603 | init_completion(&t->started); | 607 | init_completion(&t->started); |
604 | t->wanted_cluster = -1; | 608 | t->wanted_cluster = -1; |
605 | t->task = bL_switcher_thread_create(cpu, t); | 609 | t->task = bL_switcher_thread_create(cpu, t); |
606 | } | 610 | } |
607 | 611 | ||
608 | bL_switcher_active = 1; | 612 | bL_switcher_active = 1; |
609 | bL_activation_notify(BL_NOTIFY_POST_ENABLE); | 613 | bL_activation_notify(BL_NOTIFY_POST_ENABLE); |
610 | pr_info("big.LITTLE switcher initialized\n"); | 614 | pr_info("big.LITTLE switcher initialized\n"); |
611 | goto out; | 615 | goto out; |
612 | 616 | ||
613 | error: | 617 | error: |
614 | pr_warn("big.LITTLE switcher initialization failed\n"); | 618 | pr_warn("big.LITTLE switcher initialization failed\n"); |
615 | bL_activation_notify(BL_NOTIFY_POST_DISABLE); | 619 | bL_activation_notify(BL_NOTIFY_POST_DISABLE); |
616 | 620 | ||
617 | out: | 621 | out: |
618 | unlock_device_hotplug(); | 622 | unlock_device_hotplug(); |
619 | mutex_unlock(&bL_switcher_activation_lock); | 623 | mutex_unlock(&bL_switcher_activation_lock); |
620 | return ret; | 624 | return ret; |
621 | } | 625 | } |
622 | 626 | ||
623 | #ifdef CONFIG_SYSFS | 627 | #ifdef CONFIG_SYSFS |
624 | 628 | ||
625 | static void bL_switcher_disable(void) | 629 | static void bL_switcher_disable(void) |
626 | { | 630 | { |
627 | unsigned int cpu, cluster; | 631 | unsigned int cpu, cluster; |
628 | struct bL_thread *t; | 632 | struct bL_thread *t; |
629 | struct task_struct *task; | 633 | struct task_struct *task; |
630 | 634 | ||
631 | mutex_lock(&bL_switcher_activation_lock); | 635 | mutex_lock(&bL_switcher_activation_lock); |
632 | lock_device_hotplug(); | 636 | lock_device_hotplug(); |
633 | 637 | ||
634 | if (!bL_switcher_active) | 638 | if (!bL_switcher_active) |
635 | goto out; | 639 | goto out; |
636 | 640 | ||
637 | if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) { | 641 | if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) { |
638 | bL_activation_notify(BL_NOTIFY_POST_ENABLE); | 642 | bL_activation_notify(BL_NOTIFY_POST_ENABLE); |
639 | goto out; | 643 | goto out; |
640 | } | 644 | } |
641 | 645 | ||
642 | bL_switcher_active = 0; | 646 | bL_switcher_active = 0; |
643 | 647 | ||
644 | /* | 648 | /* |
645 | * To deactivate the switcher, we must shut down the switcher | 649 | * To deactivate the switcher, we must shut down the switcher |
646 | * threads to prevent any other requests from being accepted. | 650 | * threads to prevent any other requests from being accepted. |
647 | * Then, if the final cluster for given logical CPU is not the | 651 | * Then, if the final cluster for given logical CPU is not the |
648 | * same as the original one, we'll recreate a switcher thread | 652 | * same as the original one, we'll recreate a switcher thread |
649 | * just for the purpose of switching the CPU back without any | 653 | * just for the purpose of switching the CPU back without any |
650 | * possibility for interference from external requests. | 654 | * possibility for interference from external requests. |
651 | */ | 655 | */ |
652 | for_each_online_cpu(cpu) { | 656 | for_each_online_cpu(cpu) { |
653 | t = &bL_threads[cpu]; | 657 | t = &bL_threads[cpu]; |
654 | task = t->task; | 658 | task = t->task; |
655 | t->task = NULL; | 659 | t->task = NULL; |
656 | if (!task || IS_ERR(task)) | 660 | if (!task || IS_ERR(task)) |
657 | continue; | 661 | continue; |
658 | kthread_stop(task); | 662 | kthread_stop(task); |
659 | /* no more switch may happen on this CPU at this point */ | 663 | /* no more switch may happen on this CPU at this point */ |
660 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); | 664 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); |
661 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) | 665 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) |
662 | continue; | 666 | continue; |
663 | init_completion(&t->started); | 667 | init_completion(&t->started); |
664 | t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu]; | 668 | t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu]; |
665 | task = bL_switcher_thread_create(cpu, t); | 669 | task = bL_switcher_thread_create(cpu, t); |
666 | if (!IS_ERR(task)) { | 670 | if (!IS_ERR(task)) { |
667 | wait_for_completion(&t->started); | 671 | wait_for_completion(&t->started); |
668 | kthread_stop(task); | 672 | kthread_stop(task); |
669 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); | 673 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); |
670 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) | 674 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) |
671 | continue; | 675 | continue; |
672 | } | 676 | } |
673 | /* If execution gets here, we're in trouble. */ | 677 | /* If execution gets here, we're in trouble. */ |
674 | pr_crit("%s: unable to restore original cluster for CPU %d\n", | 678 | pr_crit("%s: unable to restore original cluster for CPU %d\n", |
675 | __func__, cpu); | 679 | __func__, cpu); |
676 | pr_crit("%s: CPU %d can't be restored\n", | 680 | pr_crit("%s: CPU %d can't be restored\n", |
677 | __func__, bL_switcher_cpu_pairing[cpu]); | 681 | __func__, bL_switcher_cpu_pairing[cpu]); |
678 | cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu], | 682 | cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu], |
679 | &bL_switcher_removed_logical_cpus); | 683 | &bL_switcher_removed_logical_cpus); |
680 | } | 684 | } |
681 | 685 | ||
682 | bL_switcher_restore_cpus(); | 686 | bL_switcher_restore_cpus(); |
683 | bL_switcher_trace_trigger(); | 687 | bL_switcher_trace_trigger(); |
684 | 688 | ||
685 | bL_activation_notify(BL_NOTIFY_POST_DISABLE); | 689 | bL_activation_notify(BL_NOTIFY_POST_DISABLE); |
686 | 690 | ||
687 | out: | 691 | out: |
688 | unlock_device_hotplug(); | 692 | unlock_device_hotplug(); |
689 | mutex_unlock(&bL_switcher_activation_lock); | 693 | mutex_unlock(&bL_switcher_activation_lock); |
690 | } | 694 | } |
691 | 695 | ||
692 | static ssize_t bL_switcher_active_show(struct kobject *kobj, | 696 | static ssize_t bL_switcher_active_show(struct kobject *kobj, |
693 | struct kobj_attribute *attr, char *buf) | 697 | struct kobj_attribute *attr, char *buf) |
694 | { | 698 | { |
695 | return sprintf(buf, "%u\n", bL_switcher_active); | 699 | return sprintf(buf, "%u\n", bL_switcher_active); |
696 | } | 700 | } |
697 | 701 | ||
698 | static ssize_t bL_switcher_active_store(struct kobject *kobj, | 702 | static ssize_t bL_switcher_active_store(struct kobject *kobj, |
699 | struct kobj_attribute *attr, const char *buf, size_t count) | 703 | struct kobj_attribute *attr, const char *buf, size_t count) |
700 | { | 704 | { |
701 | int ret; | 705 | int ret; |
702 | 706 | ||
703 | switch (buf[0]) { | 707 | switch (buf[0]) { |
704 | case '0': | 708 | case '0': |
705 | bL_switcher_disable(); | 709 | bL_switcher_disable(); |
706 | ret = 0; | 710 | ret = 0; |
707 | break; | 711 | break; |
708 | case '1': | 712 | case '1': |
709 | ret = bL_switcher_enable(); | 713 | ret = bL_switcher_enable(); |
710 | break; | 714 | break; |
711 | default: | 715 | default: |
712 | ret = -EINVAL; | 716 | ret = -EINVAL; |
713 | } | 717 | } |
714 | 718 | ||
715 | return (ret >= 0) ? count : ret; | 719 | return (ret >= 0) ? count : ret; |
716 | } | 720 | } |
717 | 721 | ||
718 | static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj, | 722 | static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj, |
719 | struct kobj_attribute *attr, const char *buf, size_t count) | 723 | struct kobj_attribute *attr, const char *buf, size_t count) |
720 | { | 724 | { |
721 | int ret = bL_switcher_trace_trigger(); | 725 | int ret = bL_switcher_trace_trigger(); |
722 | 726 | ||
723 | return ret ? ret : count; | 727 | return ret ? ret : count; |
724 | } | 728 | } |
725 | 729 | ||
726 | static struct kobj_attribute bL_switcher_active_attr = | 730 | static struct kobj_attribute bL_switcher_active_attr = |
727 | __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store); | 731 | __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store); |
728 | 732 | ||
729 | static struct kobj_attribute bL_switcher_trace_trigger_attr = | 733 | static struct kobj_attribute bL_switcher_trace_trigger_attr = |
730 | __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store); | 734 | __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store); |
731 | 735 | ||
732 | static struct attribute *bL_switcher_attrs[] = { | 736 | static struct attribute *bL_switcher_attrs[] = { |
733 | &bL_switcher_active_attr.attr, | 737 | &bL_switcher_active_attr.attr, |
734 | &bL_switcher_trace_trigger_attr.attr, | 738 | &bL_switcher_trace_trigger_attr.attr, |
735 | NULL, | 739 | NULL, |
736 | }; | 740 | }; |
737 | 741 | ||
738 | static struct attribute_group bL_switcher_attr_group = { | 742 | static struct attribute_group bL_switcher_attr_group = { |
739 | .attrs = bL_switcher_attrs, | 743 | .attrs = bL_switcher_attrs, |
740 | }; | 744 | }; |
741 | 745 | ||
742 | static struct kobject *bL_switcher_kobj; | 746 | static struct kobject *bL_switcher_kobj; |
743 | 747 | ||
744 | static int __init bL_switcher_sysfs_init(void) | 748 | static int __init bL_switcher_sysfs_init(void) |
745 | { | 749 | { |
746 | int ret; | 750 | int ret; |
747 | 751 | ||
748 | bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj); | 752 | bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj); |
749 | if (!bL_switcher_kobj) | 753 | if (!bL_switcher_kobj) |
750 | return -ENOMEM; | 754 | return -ENOMEM; |
751 | ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group); | 755 | ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group); |
752 | if (ret) | 756 | if (ret) |
753 | kobject_put(bL_switcher_kobj); | 757 | kobject_put(bL_switcher_kobj); |
754 | return ret; | 758 | return ret; |
755 | } | 759 | } |
756 | 760 | ||
757 | #endif /* CONFIG_SYSFS */ | 761 | #endif /* CONFIG_SYSFS */ |
758 | 762 | ||
759 | bool bL_switcher_get_enabled(void) | 763 | bool bL_switcher_get_enabled(void) |
760 | { | 764 | { |
761 | mutex_lock(&bL_switcher_activation_lock); | 765 | mutex_lock(&bL_switcher_activation_lock); |
762 | 766 | ||
763 | return bL_switcher_active; | 767 | return bL_switcher_active; |
764 | } | 768 | } |
765 | EXPORT_SYMBOL_GPL(bL_switcher_get_enabled); | 769 | EXPORT_SYMBOL_GPL(bL_switcher_get_enabled); |
766 | 770 | ||
767 | void bL_switcher_put_enabled(void) | 771 | void bL_switcher_put_enabled(void) |
768 | { | 772 | { |
769 | mutex_unlock(&bL_switcher_activation_lock); | 773 | mutex_unlock(&bL_switcher_activation_lock); |
770 | } | 774 | } |
771 | EXPORT_SYMBOL_GPL(bL_switcher_put_enabled); | 775 | EXPORT_SYMBOL_GPL(bL_switcher_put_enabled); |
772 | 776 | ||
773 | /* | 777 | /* |
774 | * Veto any CPU hotplug operation on those CPUs we've removed | 778 | * Veto any CPU hotplug operation on those CPUs we've removed |
775 | * while the switcher is active. | 779 | * while the switcher is active. |
776 | * We're just not ready to deal with that given the trickery involved. | 780 | * We're just not ready to deal with that given the trickery involved. |
777 | */ | 781 | */ |
778 | static int bL_switcher_hotplug_callback(struct notifier_block *nfb, | 782 | static int bL_switcher_hotplug_callback(struct notifier_block *nfb, |
779 | unsigned long action, void *hcpu) | 783 | unsigned long action, void *hcpu) |
780 | { | 784 | { |
781 | if (bL_switcher_active) { | 785 | if (bL_switcher_active) { |
782 | int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu]; | 786 | int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu]; |
783 | switch (action & 0xf) { | 787 | switch (action & 0xf) { |
784 | case CPU_UP_PREPARE: | 788 | case CPU_UP_PREPARE: |
785 | case CPU_DOWN_PREPARE: | 789 | case CPU_DOWN_PREPARE: |
786 | if (pairing == -1) | 790 | if (pairing == -1) |
787 | return NOTIFY_BAD; | 791 | return NOTIFY_BAD; |
788 | } | 792 | } |
789 | } | 793 | } |
790 | return NOTIFY_DONE; | 794 | return NOTIFY_DONE; |
791 | } | 795 | } |
792 | 796 | ||
793 | static bool no_bL_switcher; | 797 | static bool no_bL_switcher; |
794 | core_param(no_bL_switcher, no_bL_switcher, bool, 0644); | 798 | core_param(no_bL_switcher, no_bL_switcher, bool, 0644); |
795 | 799 | ||
796 | static int __init bL_switcher_init(void) | 800 | static int __init bL_switcher_init(void) |
797 | { | 801 | { |
798 | int ret; | 802 | int ret; |
799 | 803 | ||
800 | if (!mcpm_is_available()) | 804 | if (!mcpm_is_available()) |
801 | return -ENODEV; | 805 | return -ENODEV; |
802 | 806 | ||
803 | cpu_notifier(bL_switcher_hotplug_callback, 0); | 807 | cpu_notifier(bL_switcher_hotplug_callback, 0); |
804 | 808 | ||
805 | if (!no_bL_switcher) { | 809 | if (!no_bL_switcher) { |
806 | ret = bL_switcher_enable(); | 810 | ret = bL_switcher_enable(); |
807 | if (ret) | 811 | if (ret) |
808 | return ret; | 812 | return ret; |
809 | } | 813 | } |
810 | 814 | ||
811 | #ifdef CONFIG_SYSFS | 815 | #ifdef CONFIG_SYSFS |
812 | ret = bL_switcher_sysfs_init(); | 816 | ret = bL_switcher_sysfs_init(); |
813 | if (ret) | 817 | if (ret) |
814 | pr_err("%s: unable to create sysfs entry\n", __func__); | 818 | pr_err("%s: unable to create sysfs entry\n", __func__); |
815 | #endif | 819 | #endif |
816 | 820 | ||
817 | return 0; | 821 | return 0; |
818 | } | 822 | } |
819 | 823 | ||
820 | late_initcall(bL_switcher_init); | 824 | late_initcall(bL_switcher_init); |
821 | 825 |
1 | /* | 1 | /* |
2 | * arch/arm/include/asm/uaccess.h | 2 | * arch/arm/include/asm/uaccess.h |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | #ifndef _ASMARM_UACCESS_H | 8 | #ifndef _ASMARM_UACCESS_H |
9 | #define _ASMARM_UACCESS_H | 9 | #define _ASMARM_UACCESS_H |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * User space memory access functions | 12 | * User space memory access functions |
13 | */ | 13 | */ |
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/thread_info.h> | 15 | #include <linux/thread_info.h> |
16 | #include <asm/errno.h> | 16 | #include <asm/errno.h> |
17 | #include <asm/memory.h> | 17 | #include <asm/memory.h> |
18 | #include <asm/domain.h> | 18 | #include <asm/domain.h> |
19 | #include <asm/unified.h> | 19 | #include <asm/unified.h> |
20 | #include <asm/compiler.h> | 20 | #include <asm/compiler.h> |
21 | 21 | ||
22 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 22 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
23 | #include <asm-generic/uaccess-unaligned.h> | 23 | #include <asm-generic/uaccess-unaligned.h> |
24 | #else | 24 | #else |
25 | #define __get_user_unaligned __get_user | 25 | #define __get_user_unaligned __get_user |
26 | #define __put_user_unaligned __put_user | 26 | #define __put_user_unaligned __put_user |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | #define VERIFY_READ 0 | 29 | #define VERIFY_READ 0 |
30 | #define VERIFY_WRITE 1 | 30 | #define VERIFY_WRITE 1 |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * The exception table consists of pairs of addresses: the first is the | 33 | * The exception table consists of pairs of addresses: the first is the |
34 | * address of an instruction that is allowed to fault, and the second is | 34 | * address of an instruction that is allowed to fault, and the second is |
35 | * the address at which the program should continue. No registers are | 35 | * the address at which the program should continue. No registers are |
36 | * modified, so it is entirely up to the continuation code to figure out | 36 | * modified, so it is entirely up to the continuation code to figure out |
37 | * what to do. | 37 | * what to do. |
38 | * | 38 | * |
39 | * All the routines below use bits of fixup code that are out of line | 39 | * All the routines below use bits of fixup code that are out of line |
40 | * with the main instruction path. This means when everything is well, | 40 | * with the main instruction path. This means when everything is well, |
41 | * we don't even have to jump over them. Further, they do not intrude | 41 | * we don't even have to jump over them. Further, they do not intrude |
42 | * on our cache or tlb entries. | 42 | * on our cache or tlb entries. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | struct exception_table_entry | 45 | struct exception_table_entry |
46 | { | 46 | { |
47 | unsigned long insn, fixup; | 47 | unsigned long insn, fixup; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | extern int fixup_exception(struct pt_regs *regs); | 50 | extern int fixup_exception(struct pt_regs *regs); |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * These two are intentionally not defined anywhere - if the kernel | 53 | * These two are intentionally not defined anywhere - if the kernel |
54 | * code generates any references to them, that's a bug. | 54 | * code generates any references to them, that's a bug. |
55 | */ | 55 | */ |
56 | extern int __get_user_bad(void); | 56 | extern int __get_user_bad(void); |
57 | extern int __put_user_bad(void); | 57 | extern int __put_user_bad(void); |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * Note that this is actually 0x1,0000,0000 | 60 | * Note that this is actually 0x1,0000,0000 |
61 | */ | 61 | */ |
62 | #define KERNEL_DS 0x00000000 | 62 | #define KERNEL_DS 0x00000000 |
63 | #define get_ds() (KERNEL_DS) | 63 | #define get_ds() (KERNEL_DS) |
64 | 64 | ||
65 | #ifdef CONFIG_MMU | 65 | #ifdef CONFIG_MMU |
66 | 66 | ||
67 | #define USER_DS TASK_SIZE | 67 | #define USER_DS TASK_SIZE |
68 | #define get_fs() (current_thread_info()->addr_limit) | 68 | #define get_fs() (current_thread_info()->addr_limit) |
69 | 69 | ||
70 | static inline void set_fs(mm_segment_t fs) | 70 | static inline void set_fs(mm_segment_t fs) |
71 | { | 71 | { |
72 | current_thread_info()->addr_limit = fs; | 72 | current_thread_info()->addr_limit = fs; |
73 | modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); | 73 | modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); |
74 | } | 74 | } |
75 | 75 | ||
76 | #define segment_eq(a,b) ((a) == (b)) | 76 | #define segment_eq(a,b) ((a) == (b)) |
77 | 77 | ||
78 | #define __addr_ok(addr) ({ \ | 78 | #define __addr_ok(addr) ({ \ |
79 | unsigned long flag; \ | 79 | unsigned long flag; \ |
80 | __asm__("cmp %2, %0; movlo %0, #0" \ | 80 | __asm__("cmp %2, %0; movlo %0, #0" \ |
81 | : "=&r" (flag) \ | 81 | : "=&r" (flag) \ |
82 | : "0" (current_thread_info()->addr_limit), "r" (addr) \ | 82 | : "0" (current_thread_info()->addr_limit), "r" (addr) \ |
83 | : "cc"); \ | 83 | : "cc"); \ |
84 | (flag == 0); }) | 84 | (flag == 0); }) |
85 | 85 | ||
86 | /* We use 33-bit arithmetic here... */ | 86 | /* We use 33-bit arithmetic here... */ |
87 | #define __range_ok(addr,size) ({ \ | 87 | #define __range_ok(addr,size) ({ \ |
88 | unsigned long flag, roksum; \ | 88 | unsigned long flag, roksum; \ |
89 | __chk_user_ptr(addr); \ | 89 | __chk_user_ptr(addr); \ |
90 | __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ | 90 | __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ |
91 | : "=&r" (flag), "=&r" (roksum) \ | 91 | : "=&r" (flag), "=&r" (roksum) \ |
92 | : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ | 92 | : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \ |
93 | : "cc"); \ | 93 | : "cc"); \ |
94 | flag; }) | 94 | flag; }) |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Single-value transfer routines. They automatically use the right | 97 | * Single-value transfer routines. They automatically use the right |
98 | * size if we just have the right pointer type. Note that the functions | 98 | * size if we just have the right pointer type. Note that the functions |
99 | * which read from user space (*get_*) need to take care not to leak | 99 | * which read from user space (*get_*) need to take care not to leak |
100 | * kernel data even if the calling code is buggy and fails to check | 100 | * kernel data even if the calling code is buggy and fails to check |
101 | * the return value. This means zeroing out the destination variable | 101 | * the return value. This means zeroing out the destination variable |
102 | * or buffer on error. Normally this is done out of line by the | 102 | * or buffer on error. Normally this is done out of line by the |
103 | * fixup code, but there are a few places where it intrudes on the | 103 | * fixup code, but there are a few places where it intrudes on the |
104 | * main code path. When we only write to user space, there is no | 104 | * main code path. When we only write to user space, there is no |
105 | * problem. | 105 | * problem. |
106 | */ | 106 | */ |
107 | extern int __get_user_1(void *); | 107 | extern int __get_user_1(void *); |
108 | extern int __get_user_2(void *); | 108 | extern int __get_user_2(void *); |
109 | extern int __get_user_4(void *); | 109 | extern int __get_user_4(void *); |
110 | 110 | ||
111 | #define __GUP_CLOBBER_1 "lr", "cc" | 111 | #define __GUP_CLOBBER_1 "lr", "cc" |
112 | #ifdef CONFIG_CPU_USE_DOMAINS | 112 | #ifdef CONFIG_CPU_USE_DOMAINS |
113 | #define __GUP_CLOBBER_2 "ip", "lr", "cc" | 113 | #define __GUP_CLOBBER_2 "ip", "lr", "cc" |
114 | #else | 114 | #else |
115 | #define __GUP_CLOBBER_2 "lr", "cc" | 115 | #define __GUP_CLOBBER_2 "lr", "cc" |
116 | #endif | 116 | #endif |
117 | #define __GUP_CLOBBER_4 "lr", "cc" | 117 | #define __GUP_CLOBBER_4 "lr", "cc" |
118 | 118 | ||
119 | #define __get_user_x(__r2,__p,__e,__l,__s) \ | 119 | #define __get_user_x(__r2,__p,__e,__l,__s) \ |
120 | __asm__ __volatile__ ( \ | 120 | __asm__ __volatile__ ( \ |
121 | __asmeq("%0", "r0") __asmeq("%1", "r2") \ | 121 | __asmeq("%0", "r0") __asmeq("%1", "r2") \ |
122 | __asmeq("%3", "r1") \ | 122 | __asmeq("%3", "r1") \ |
123 | "bl __get_user_" #__s \ | 123 | "bl __get_user_" #__s \ |
124 | : "=&r" (__e), "=r" (__r2) \ | 124 | : "=&r" (__e), "=r" (__r2) \ |
125 | : "0" (__p), "r" (__l) \ | 125 | : "0" (__p), "r" (__l) \ |
126 | : __GUP_CLOBBER_##__s) | 126 | : __GUP_CLOBBER_##__s) |
127 | 127 | ||
128 | #define __get_user_check(x,p) \ | 128 | #define __get_user_check(x,p) \ |
129 | ({ \ | 129 | ({ \ |
130 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ | 130 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ |
131 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ | 131 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ |
132 | register unsigned long __r2 asm("r2"); \ | 132 | register unsigned long __r2 asm("r2"); \ |
133 | register unsigned long __l asm("r1") = __limit; \ | 133 | register unsigned long __l asm("r1") = __limit; \ |
134 | register int __e asm("r0"); \ | 134 | register int __e asm("r0"); \ |
135 | switch (sizeof(*(__p))) { \ | 135 | switch (sizeof(*(__p))) { \ |
136 | case 1: \ | 136 | case 1: \ |
137 | __get_user_x(__r2, __p, __e, __l, 1); \ | 137 | __get_user_x(__r2, __p, __e, __l, 1); \ |
138 | break; \ | 138 | break; \ |
139 | case 2: \ | 139 | case 2: \ |
140 | __get_user_x(__r2, __p, __e, __l, 2); \ | 140 | __get_user_x(__r2, __p, __e, __l, 2); \ |
141 | break; \ | 141 | break; \ |
142 | case 4: \ | 142 | case 4: \ |
143 | __get_user_x(__r2, __p, __e, __l, 4); \ | 143 | __get_user_x(__r2, __p, __e, __l, 4); \ |
144 | break; \ | 144 | break; \ |
145 | default: __e = __get_user_bad(); break; \ | 145 | default: __e = __get_user_bad(); break; \ |
146 | } \ | 146 | } \ |
147 | x = (typeof(*(p))) __r2; \ | 147 | x = (typeof(*(p))) __r2; \ |
148 | __e; \ | 148 | __e; \ |
149 | }) | 149 | }) |
150 | 150 | ||
151 | #define get_user(x,p) \ | 151 | #define get_user(x,p) \ |
152 | ({ \ | 152 | ({ \ |
153 | might_fault(); \ | 153 | might_fault(); \ |
154 | __get_user_check(x,p); \ | 154 | __get_user_check(x,p); \ |
155 | }) | 155 | }) |
156 | 156 | ||
157 | extern int __put_user_1(void *, unsigned int); | 157 | extern int __put_user_1(void *, unsigned int); |
158 | extern int __put_user_2(void *, unsigned int); | 158 | extern int __put_user_2(void *, unsigned int); |
159 | extern int __put_user_4(void *, unsigned int); | 159 | extern int __put_user_4(void *, unsigned int); |
160 | extern int __put_user_8(void *, unsigned long long); | 160 | extern int __put_user_8(void *, unsigned long long); |
161 | 161 | ||
162 | #define __put_user_x(__r2,__p,__e,__l,__s) \ | 162 | #define __put_user_x(__r2,__p,__e,__l,__s) \ |
163 | __asm__ __volatile__ ( \ | 163 | __asm__ __volatile__ ( \ |
164 | __asmeq("%0", "r0") __asmeq("%2", "r2") \ | 164 | __asmeq("%0", "r0") __asmeq("%2", "r2") \ |
165 | __asmeq("%3", "r1") \ | 165 | __asmeq("%3", "r1") \ |
166 | "bl __put_user_" #__s \ | 166 | "bl __put_user_" #__s \ |
167 | : "=&r" (__e) \ | 167 | : "=&r" (__e) \ |
168 | : "0" (__p), "r" (__r2), "r" (__l) \ | 168 | : "0" (__p), "r" (__r2), "r" (__l) \ |
169 | : "ip", "lr", "cc") | 169 | : "ip", "lr", "cc") |
170 | 170 | ||
171 | #define __put_user_check(x,p) \ | 171 | #define __put_user_check(x,p) \ |
172 | ({ \ | 172 | ({ \ |
173 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ | 173 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ |
174 | const typeof(*(p)) __user *__tmp_p = (p); \ | ||
174 | register const typeof(*(p)) __r2 asm("r2") = (x); \ | 175 | register const typeof(*(p)) __r2 asm("r2") = (x); \ |
175 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ | 176 | register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ |
176 | register unsigned long __l asm("r1") = __limit; \ | 177 | register unsigned long __l asm("r1") = __limit; \ |
177 | register int __e asm("r0"); \ | 178 | register int __e asm("r0"); \ |
178 | switch (sizeof(*(__p))) { \ | 179 | switch (sizeof(*(__p))) { \ |
179 | case 1: \ | 180 | case 1: \ |
180 | __put_user_x(__r2, __p, __e, __l, 1); \ | 181 | __put_user_x(__r2, __p, __e, __l, 1); \ |
181 | break; \ | 182 | break; \ |
182 | case 2: \ | 183 | case 2: \ |
183 | __put_user_x(__r2, __p, __e, __l, 2); \ | 184 | __put_user_x(__r2, __p, __e, __l, 2); \ |
184 | break; \ | 185 | break; \ |
185 | case 4: \ | 186 | case 4: \ |
186 | __put_user_x(__r2, __p, __e, __l, 4); \ | 187 | __put_user_x(__r2, __p, __e, __l, 4); \ |
187 | break; \ | 188 | break; \ |
188 | case 8: \ | 189 | case 8: \ |
189 | __put_user_x(__r2, __p, __e, __l, 8); \ | 190 | __put_user_x(__r2, __p, __e, __l, 8); \ |
190 | break; \ | 191 | break; \ |
191 | default: __e = __put_user_bad(); break; \ | 192 | default: __e = __put_user_bad(); break; \ |
192 | } \ | 193 | } \ |
193 | __e; \ | 194 | __e; \ |
194 | }) | 195 | }) |
195 | 196 | ||
196 | #define put_user(x,p) \ | 197 | #define put_user(x,p) \ |
197 | ({ \ | 198 | ({ \ |
198 | might_fault(); \ | 199 | might_fault(); \ |
199 | __put_user_check(x,p); \ | 200 | __put_user_check(x,p); \ |
200 | }) | 201 | }) |
201 | 202 | ||
202 | #else /* CONFIG_MMU */ | 203 | #else /* CONFIG_MMU */ |
203 | 204 | ||
204 | /* | 205 | /* |
205 | * uClinux has only one addr space, so has simplified address limits. | 206 | * uClinux has only one addr space, so has simplified address limits. |
206 | */ | 207 | */ |
207 | #define USER_DS KERNEL_DS | 208 | #define USER_DS KERNEL_DS |
208 | 209 | ||
209 | #define segment_eq(a,b) (1) | 210 | #define segment_eq(a,b) (1) |
210 | #define __addr_ok(addr) ((void)(addr),1) | 211 | #define __addr_ok(addr) ((void)(addr),1) |
211 | #define __range_ok(addr,size) ((void)(addr),0) | 212 | #define __range_ok(addr,size) ((void)(addr),0) |
212 | #define get_fs() (KERNEL_DS) | 213 | #define get_fs() (KERNEL_DS) |
213 | 214 | ||
214 | static inline void set_fs(mm_segment_t fs) | 215 | static inline void set_fs(mm_segment_t fs) |
215 | { | 216 | { |
216 | } | 217 | } |
217 | 218 | ||
218 | #define get_user(x,p) __get_user(x,p) | 219 | #define get_user(x,p) __get_user(x,p) |
219 | #define put_user(x,p) __put_user(x,p) | 220 | #define put_user(x,p) __put_user(x,p) |
220 | 221 | ||
221 | #endif /* CONFIG_MMU */ | 222 | #endif /* CONFIG_MMU */ |
222 | 223 | ||
223 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) | 224 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) |
224 | 225 | ||
225 | #define user_addr_max() \ | 226 | #define user_addr_max() \ |
226 | (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) | 227 | (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) |
227 | 228 | ||
228 | /* | 229 | /* |
229 | * The "__xxx" versions of the user access functions do not verify the | 230 | * The "__xxx" versions of the user access functions do not verify the |
230 | * address space - it must have been done previously with a separate | 231 | * address space - it must have been done previously with a separate |
231 | * "access_ok()" call. | 232 | * "access_ok()" call. |
232 | * | 233 | * |
233 | * The "xxx_error" versions set the third argument to EFAULT if an | 234 | * The "xxx_error" versions set the third argument to EFAULT if an |
234 | * error occurs, and leave it unchanged on success. Note that these | 235 | * error occurs, and leave it unchanged on success. Note that these |
235 | * versions are void (ie, don't return a value as such). | 236 | * versions are void (ie, don't return a value as such). |
236 | */ | 237 | */ |
237 | #define __get_user(x,ptr) \ | 238 | #define __get_user(x,ptr) \ |
238 | ({ \ | 239 | ({ \ |
239 | long __gu_err = 0; \ | 240 | long __gu_err = 0; \ |
240 | __get_user_err((x),(ptr),__gu_err); \ | 241 | __get_user_err((x),(ptr),__gu_err); \ |
241 | __gu_err; \ | 242 | __gu_err; \ |
242 | }) | 243 | }) |
243 | 244 | ||
244 | #define __get_user_error(x,ptr,err) \ | 245 | #define __get_user_error(x,ptr,err) \ |
245 | ({ \ | 246 | ({ \ |
246 | __get_user_err((x),(ptr),err); \ | 247 | __get_user_err((x),(ptr),err); \ |
247 | (void) 0; \ | 248 | (void) 0; \ |
248 | }) | 249 | }) |
249 | 250 | ||
250 | #define __get_user_err(x,ptr,err) \ | 251 | #define __get_user_err(x,ptr,err) \ |
251 | do { \ | 252 | do { \ |
252 | unsigned long __gu_addr = (unsigned long)(ptr); \ | 253 | unsigned long __gu_addr = (unsigned long)(ptr); \ |
253 | unsigned long __gu_val; \ | 254 | unsigned long __gu_val; \ |
254 | __chk_user_ptr(ptr); \ | 255 | __chk_user_ptr(ptr); \ |
255 | might_fault(); \ | 256 | might_fault(); \ |
256 | switch (sizeof(*(ptr))) { \ | 257 | switch (sizeof(*(ptr))) { \ |
257 | case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ | 258 | case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ |
258 | case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ | 259 | case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ |
259 | case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ | 260 | case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ |
260 | default: (__gu_val) = __get_user_bad(); \ | 261 | default: (__gu_val) = __get_user_bad(); \ |
261 | } \ | 262 | } \ |
262 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 263 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
263 | } while (0) | 264 | } while (0) |
264 | 265 | ||
265 | #define __get_user_asm_byte(x,addr,err) \ | 266 | #define __get_user_asm_byte(x,addr,err) \ |
266 | __asm__ __volatile__( \ | 267 | __asm__ __volatile__( \ |
267 | "1: " TUSER(ldrb) " %1,[%2],#0\n" \ | 268 | "1: " TUSER(ldrb) " %1,[%2],#0\n" \ |
268 | "2:\n" \ | 269 | "2:\n" \ |
269 | " .pushsection .fixup,\"ax\"\n" \ | 270 | " .pushsection .fixup,\"ax\"\n" \ |
270 | " .align 2\n" \ | 271 | " .align 2\n" \ |
271 | "3: mov %0, %3\n" \ | 272 | "3: mov %0, %3\n" \ |
272 | " mov %1, #0\n" \ | 273 | " mov %1, #0\n" \ |
273 | " b 2b\n" \ | 274 | " b 2b\n" \ |
274 | " .popsection\n" \ | 275 | " .popsection\n" \ |
275 | " .pushsection __ex_table,\"a\"\n" \ | 276 | " .pushsection __ex_table,\"a\"\n" \ |
276 | " .align 3\n" \ | 277 | " .align 3\n" \ |
277 | " .long 1b, 3b\n" \ | 278 | " .long 1b, 3b\n" \ |
278 | " .popsection" \ | 279 | " .popsection" \ |
279 | : "+r" (err), "=&r" (x) \ | 280 | : "+r" (err), "=&r" (x) \ |
280 | : "r" (addr), "i" (-EFAULT) \ | 281 | : "r" (addr), "i" (-EFAULT) \ |
281 | : "cc") | 282 | : "cc") |
282 | 283 | ||
283 | #ifndef __ARMEB__ | 284 | #ifndef __ARMEB__ |
284 | #define __get_user_asm_half(x,__gu_addr,err) \ | 285 | #define __get_user_asm_half(x,__gu_addr,err) \ |
285 | ({ \ | 286 | ({ \ |
286 | unsigned long __b1, __b2; \ | 287 | unsigned long __b1, __b2; \ |
287 | __get_user_asm_byte(__b1, __gu_addr, err); \ | 288 | __get_user_asm_byte(__b1, __gu_addr, err); \ |
288 | __get_user_asm_byte(__b2, __gu_addr + 1, err); \ | 289 | __get_user_asm_byte(__b2, __gu_addr + 1, err); \ |
289 | (x) = __b1 | (__b2 << 8); \ | 290 | (x) = __b1 | (__b2 << 8); \ |
290 | }) | 291 | }) |
291 | #else | 292 | #else |
292 | #define __get_user_asm_half(x,__gu_addr,err) \ | 293 | #define __get_user_asm_half(x,__gu_addr,err) \ |
293 | ({ \ | 294 | ({ \ |
294 | unsigned long __b1, __b2; \ | 295 | unsigned long __b1, __b2; \ |
295 | __get_user_asm_byte(__b1, __gu_addr, err); \ | 296 | __get_user_asm_byte(__b1, __gu_addr, err); \ |
296 | __get_user_asm_byte(__b2, __gu_addr + 1, err); \ | 297 | __get_user_asm_byte(__b2, __gu_addr + 1, err); \ |
297 | (x) = (__b1 << 8) | __b2; \ | 298 | (x) = (__b1 << 8) | __b2; \ |
298 | }) | 299 | }) |
299 | #endif | 300 | #endif |
300 | 301 | ||
301 | #define __get_user_asm_word(x,addr,err) \ | 302 | #define __get_user_asm_word(x,addr,err) \ |
302 | __asm__ __volatile__( \ | 303 | __asm__ __volatile__( \ |
303 | "1: " TUSER(ldr) " %1,[%2],#0\n" \ | 304 | "1: " TUSER(ldr) " %1,[%2],#0\n" \ |
304 | "2:\n" \ | 305 | "2:\n" \ |
305 | " .pushsection .fixup,\"ax\"\n" \ | 306 | " .pushsection .fixup,\"ax\"\n" \ |
306 | " .align 2\n" \ | 307 | " .align 2\n" \ |
307 | "3: mov %0, %3\n" \ | 308 | "3: mov %0, %3\n" \ |
308 | " mov %1, #0\n" \ | 309 | " mov %1, #0\n" \ |
309 | " b 2b\n" \ | 310 | " b 2b\n" \ |
310 | " .popsection\n" \ | 311 | " .popsection\n" \ |
311 | " .pushsection __ex_table,\"a\"\n" \ | 312 | " .pushsection __ex_table,\"a\"\n" \ |
312 | " .align 3\n" \ | 313 | " .align 3\n" \ |
313 | " .long 1b, 3b\n" \ | 314 | " .long 1b, 3b\n" \ |
314 | " .popsection" \ | 315 | " .popsection" \ |
315 | : "+r" (err), "=&r" (x) \ | 316 | : "+r" (err), "=&r" (x) \ |
316 | : "r" (addr), "i" (-EFAULT) \ | 317 | : "r" (addr), "i" (-EFAULT) \ |
317 | : "cc") | 318 | : "cc") |
318 | 319 | ||
319 | #define __put_user(x,ptr) \ | 320 | #define __put_user(x,ptr) \ |
320 | ({ \ | 321 | ({ \ |
321 | long __pu_err = 0; \ | 322 | long __pu_err = 0; \ |
322 | __put_user_err((x),(ptr),__pu_err); \ | 323 | __put_user_err((x),(ptr),__pu_err); \ |
323 | __pu_err; \ | 324 | __pu_err; \ |
324 | }) | 325 | }) |
325 | 326 | ||
326 | #define __put_user_error(x,ptr,err) \ | 327 | #define __put_user_error(x,ptr,err) \ |
327 | ({ \ | 328 | ({ \ |
328 | __put_user_err((x),(ptr),err); \ | 329 | __put_user_err((x),(ptr),err); \ |
329 | (void) 0; \ | 330 | (void) 0; \ |
330 | }) | 331 | }) |
331 | 332 | ||
332 | #define __put_user_err(x,ptr,err) \ | 333 | #define __put_user_err(x,ptr,err) \ |
333 | do { \ | 334 | do { \ |
334 | unsigned long __pu_addr = (unsigned long)(ptr); \ | 335 | unsigned long __pu_addr = (unsigned long)(ptr); \ |
335 | __typeof__(*(ptr)) __pu_val = (x); \ | 336 | __typeof__(*(ptr)) __pu_val = (x); \ |
336 | __chk_user_ptr(ptr); \ | 337 | __chk_user_ptr(ptr); \ |
337 | might_fault(); \ | 338 | might_fault(); \ |
338 | switch (sizeof(*(ptr))) { \ | 339 | switch (sizeof(*(ptr))) { \ |
339 | case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ | 340 | case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ |
340 | case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ | 341 | case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ |
341 | case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ | 342 | case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ |
342 | case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ | 343 | case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ |
343 | default: __put_user_bad(); \ | 344 | default: __put_user_bad(); \ |
344 | } \ | 345 | } \ |
345 | } while (0) | 346 | } while (0) |
346 | 347 | ||
347 | #define __put_user_asm_byte(x,__pu_addr,err) \ | 348 | #define __put_user_asm_byte(x,__pu_addr,err) \ |
348 | __asm__ __volatile__( \ | 349 | __asm__ __volatile__( \ |
349 | "1: " TUSER(strb) " %1,[%2],#0\n" \ | 350 | "1: " TUSER(strb) " %1,[%2],#0\n" \ |
350 | "2:\n" \ | 351 | "2:\n" \ |
351 | " .pushsection .fixup,\"ax\"\n" \ | 352 | " .pushsection .fixup,\"ax\"\n" \ |
352 | " .align 2\n" \ | 353 | " .align 2\n" \ |
353 | "3: mov %0, %3\n" \ | 354 | "3: mov %0, %3\n" \ |
354 | " b 2b\n" \ | 355 | " b 2b\n" \ |
355 | " .popsection\n" \ | 356 | " .popsection\n" \ |
356 | " .pushsection __ex_table,\"a\"\n" \ | 357 | " .pushsection __ex_table,\"a\"\n" \ |
357 | " .align 3\n" \ | 358 | " .align 3\n" \ |
358 | " .long 1b, 3b\n" \ | 359 | " .long 1b, 3b\n" \ |
359 | " .popsection" \ | 360 | " .popsection" \ |
360 | : "+r" (err) \ | 361 | : "+r" (err) \ |
361 | : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ | 362 | : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ |
362 | : "cc") | 363 | : "cc") |
363 | 364 | ||
364 | #ifndef __ARMEB__ | 365 | #ifndef __ARMEB__ |
365 | #define __put_user_asm_half(x,__pu_addr,err) \ | 366 | #define __put_user_asm_half(x,__pu_addr,err) \ |
366 | ({ \ | 367 | ({ \ |
367 | unsigned long __temp = (unsigned long)(x); \ | 368 | unsigned long __temp = (unsigned long)(x); \ |
368 | __put_user_asm_byte(__temp, __pu_addr, err); \ | 369 | __put_user_asm_byte(__temp, __pu_addr, err); \ |
369 | __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ | 370 | __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ |
370 | }) | 371 | }) |
371 | #else | 372 | #else |
372 | #define __put_user_asm_half(x,__pu_addr,err) \ | 373 | #define __put_user_asm_half(x,__pu_addr,err) \ |
373 | ({ \ | 374 | ({ \ |
374 | unsigned long __temp = (unsigned long)(x); \ | 375 | unsigned long __temp = (unsigned long)(x); \ |
375 | __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ | 376 | __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ |
376 | __put_user_asm_byte(__temp, __pu_addr + 1, err); \ | 377 | __put_user_asm_byte(__temp, __pu_addr + 1, err); \ |
377 | }) | 378 | }) |
378 | #endif | 379 | #endif |
379 | 380 | ||
380 | #define __put_user_asm_word(x,__pu_addr,err) \ | 381 | #define __put_user_asm_word(x,__pu_addr,err) \ |
381 | __asm__ __volatile__( \ | 382 | __asm__ __volatile__( \ |
382 | "1: " TUSER(str) " %1,[%2],#0\n" \ | 383 | "1: " TUSER(str) " %1,[%2],#0\n" \ |
383 | "2:\n" \ | 384 | "2:\n" \ |
384 | " .pushsection .fixup,\"ax\"\n" \ | 385 | " .pushsection .fixup,\"ax\"\n" \ |
385 | " .align 2\n" \ | 386 | " .align 2\n" \ |
386 | "3: mov %0, %3\n" \ | 387 | "3: mov %0, %3\n" \ |
387 | " b 2b\n" \ | 388 | " b 2b\n" \ |
388 | " .popsection\n" \ | 389 | " .popsection\n" \ |
389 | " .pushsection __ex_table,\"a\"\n" \ | 390 | " .pushsection __ex_table,\"a\"\n" \ |
390 | " .align 3\n" \ | 391 | " .align 3\n" \ |
391 | " .long 1b, 3b\n" \ | 392 | " .long 1b, 3b\n" \ |
392 | " .popsection" \ | 393 | " .popsection" \ |
393 | : "+r" (err) \ | 394 | : "+r" (err) \ |
394 | : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ | 395 | : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ |
395 | : "cc") | 396 | : "cc") |
396 | 397 | ||
397 | #ifndef __ARMEB__ | 398 | #ifndef __ARMEB__ |
398 | #define __reg_oper0 "%R2" | 399 | #define __reg_oper0 "%R2" |
399 | #define __reg_oper1 "%Q2" | 400 | #define __reg_oper1 "%Q2" |
400 | #else | 401 | #else |
401 | #define __reg_oper0 "%Q2" | 402 | #define __reg_oper0 "%Q2" |
402 | #define __reg_oper1 "%R2" | 403 | #define __reg_oper1 "%R2" |
403 | #endif | 404 | #endif |
404 | 405 | ||
405 | #define __put_user_asm_dword(x,__pu_addr,err) \ | 406 | #define __put_user_asm_dword(x,__pu_addr,err) \ |
406 | __asm__ __volatile__( \ | 407 | __asm__ __volatile__( \ |
407 | ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ | 408 | ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ |
408 | ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ | 409 | ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ |
409 | THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ | 410 | THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ |
410 | THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ | 411 | THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ |
411 | "3:\n" \ | 412 | "3:\n" \ |
412 | " .pushsection .fixup,\"ax\"\n" \ | 413 | " .pushsection .fixup,\"ax\"\n" \ |
413 | " .align 2\n" \ | 414 | " .align 2\n" \ |
414 | "4: mov %0, %3\n" \ | 415 | "4: mov %0, %3\n" \ |
415 | " b 3b\n" \ | 416 | " b 3b\n" \ |
416 | " .popsection\n" \ | 417 | " .popsection\n" \ |
417 | " .pushsection __ex_table,\"a\"\n" \ | 418 | " .pushsection __ex_table,\"a\"\n" \ |
418 | " .align 3\n" \ | 419 | " .align 3\n" \ |
419 | " .long 1b, 4b\n" \ | 420 | " .long 1b, 4b\n" \ |
420 | " .long 2b, 4b\n" \ | 421 | " .long 2b, 4b\n" \ |
421 | " .popsection" \ | 422 | " .popsection" \ |
422 | : "+r" (err), "+r" (__pu_addr) \ | 423 | : "+r" (err), "+r" (__pu_addr) \ |
423 | : "r" (x), "i" (-EFAULT) \ | 424 | : "r" (x), "i" (-EFAULT) \ |
424 | : "cc") | 425 | : "cc") |
425 | 426 | ||
426 | 427 | ||
427 | #ifdef CONFIG_MMU | 428 | #ifdef CONFIG_MMU |
428 | extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); | 429 | extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); |
429 | extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); | 430 | extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); |
430 | extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); | 431 | extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); |
431 | extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); | 432 | extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); |
432 | extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); | 433 | extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); |
433 | #else | 434 | #else |
434 | #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) | 435 | #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) |
435 | #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) | 436 | #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) |
436 | #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) | 437 | #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) |
437 | #endif | 438 | #endif |
438 | 439 | ||
439 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) | 440 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) |
440 | { | 441 | { |
441 | if (access_ok(VERIFY_READ, from, n)) | 442 | if (access_ok(VERIFY_READ, from, n)) |
442 | n = __copy_from_user(to, from, n); | 443 | n = __copy_from_user(to, from, n); |
443 | else /* security hole - plug it */ | 444 | else /* security hole - plug it */ |
444 | memset(to, 0, n); | 445 | memset(to, 0, n); |
445 | return n; | 446 | return n; |
446 | } | 447 | } |
447 | 448 | ||
448 | static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) | 449 | static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) |
449 | { | 450 | { |
450 | if (access_ok(VERIFY_WRITE, to, n)) | 451 | if (access_ok(VERIFY_WRITE, to, n)) |
451 | n = __copy_to_user(to, from, n); | 452 | n = __copy_to_user(to, from, n); |
452 | return n; | 453 | return n; |
453 | } | 454 | } |
454 | 455 | ||
455 | #define __copy_to_user_inatomic __copy_to_user | 456 | #define __copy_to_user_inatomic __copy_to_user |
456 | #define __copy_from_user_inatomic __copy_from_user | 457 | #define __copy_from_user_inatomic __copy_from_user |
457 | 458 | ||
458 | static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) | 459 | static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) |
459 | { | 460 | { |
460 | if (access_ok(VERIFY_WRITE, to, n)) | 461 | if (access_ok(VERIFY_WRITE, to, n)) |
461 | n = __clear_user(to, n); | 462 | n = __clear_user(to, n); |
462 | return n; | 463 | return n; |
463 | } | 464 | } |
464 | 465 | ||
465 | extern long strncpy_from_user(char *dest, const char __user *src, long count); | 466 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
466 | 467 | ||
467 | extern __must_check long strlen_user(const char __user *str); | 468 | extern __must_check long strlen_user(const char __user *str); |
468 | extern __must_check long strnlen_user(const char __user *str, long n); | 469 | extern __must_check long strnlen_user(const char __user *str, long n); |
469 | 470 | ||
470 | #endif /* _ASMARM_UACCESS_H */ | 471 | #endif /* _ASMARM_UACCESS_H */ |
471 | 472 |
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/linkage.h> | 2 | #include <linux/linkage.h> |
3 | 3 | ||
4 | #include <asm/assembler.h> | 4 | #include <asm/assembler.h> |
5 | #include <asm/asm-offsets.h> | 5 | #include <asm/asm-offsets.h> |
6 | #include <asm/errno.h> | 6 | #include <asm/errno.h> |
7 | #include <asm/thread_info.h> | 7 | #include <asm/thread_info.h> |
8 | #include <asm/v7m.h> | 8 | #include <asm/v7m.h> |
9 | 9 | ||
10 | @ Bad Abort numbers | 10 | @ Bad Abort numbers |
11 | @ ----------------- | 11 | @ ----------------- |
12 | @ | 12 | @ |
13 | #define BAD_PREFETCH 0 | 13 | #define BAD_PREFETCH 0 |
14 | #define BAD_DATA 1 | 14 | #define BAD_DATA 1 |
15 | #define BAD_ADDREXCPTN 2 | 15 | #define BAD_ADDREXCPTN 2 |
16 | #define BAD_IRQ 3 | 16 | #define BAD_IRQ 3 |
17 | #define BAD_UNDEFINSTR 4 | 17 | #define BAD_UNDEFINSTR 4 |
18 | 18 | ||
19 | @ | 19 | @ |
20 | @ Most of the stack format comes from struct pt_regs, but with | 20 | @ Most of the stack format comes from struct pt_regs, but with |
21 | @ the addition of 8 bytes for storing syscall args 5 and 6. | 21 | @ the addition of 8 bytes for storing syscall args 5 and 6. |
22 | @ This _must_ remain a multiple of 8 for EABI. | 22 | @ This _must_ remain a multiple of 8 for EABI. |
23 | @ | 23 | @ |
24 | #define S_OFF 8 | 24 | #define S_OFF 8 |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * The SWI code relies on the fact that R0 is at the bottom of the stack | 27 | * The SWI code relies on the fact that R0 is at the bottom of the stack |
28 | * (due to slow/fast restore user regs). | 28 | * (due to slow/fast restore user regs). |
29 | */ | 29 | */ |
30 | #if S_R0 != 0 | 30 | #if S_R0 != 0 |
31 | #error "Please fix" | 31 | #error "Please fix" |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | .macro zero_fp | 34 | .macro zero_fp |
35 | #ifdef CONFIG_FRAME_POINTER | 35 | #ifdef CONFIG_FRAME_POINTER |
36 | mov fp, #0 | 36 | mov fp, #0 |
37 | #endif | 37 | #endif |
38 | .endm | 38 | .endm |
39 | 39 | ||
40 | .macro alignment_trap, rtemp | 40 | .macro alignment_trap, rtemp |
41 | #ifdef CONFIG_ALIGNMENT_TRAP | 41 | #ifdef CONFIG_ALIGNMENT_TRAP |
42 | ldr \rtemp, .LCcralign | 42 | ldr \rtemp, .LCcralign |
43 | ldr \rtemp, [\rtemp] | 43 | ldr \rtemp, [\rtemp] |
44 | mcr p15, 0, \rtemp, c1, c0 | 44 | mcr p15, 0, \rtemp, c1, c0 |
45 | #endif | 45 | #endif |
46 | .endm | 46 | .endm |
47 | 47 | ||
48 | #ifdef CONFIG_CPU_V7M | 48 | #ifdef CONFIG_CPU_V7M |
49 | /* | 49 | /* |
50 | * ARMv7-M exception entry/exit macros. | 50 | * ARMv7-M exception entry/exit macros. |
51 | * | 51 | * |
52 | * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are | 52 | * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are |
53 | * automatically saved on the current stack (32 words) before | 53 | * automatically saved on the current stack (32 words) before |
54 | * switching to the exception stack (SP_main). | 54 | * switching to the exception stack (SP_main). |
55 | * | 55 | * |
56 | * If exception is taken while in user mode, SP_main is | 56 | * If exception is taken while in user mode, SP_main is |
57 | * empty. Otherwise, SP_main is aligned to 64 bit automatically | 57 | * empty. Otherwise, SP_main is aligned to 64 bit automatically |
58 | * (CCR.STKALIGN set). | 58 | * (CCR.STKALIGN set). |
59 | * | 59 | * |
60 | * Linux assumes that the interrupts are disabled when entering an | 60 | * Linux assumes that the interrupts are disabled when entering an |
61 | * exception handler and it may BUG if this is not the case. Interrupts | 61 | * exception handler and it may BUG if this is not the case. Interrupts |
62 | * are disabled during entry and reenabled in the exit macro. | 62 | * are disabled during entry and reenabled in the exit macro. |
63 | * | 63 | * |
64 | * v7m_exception_slow_exit is used when returning from SVC or PendSV. | 64 | * v7m_exception_slow_exit is used when returning from SVC or PendSV. |
65 | * When returning to kernel mode, we don't return from exception. | 65 | * When returning to kernel mode, we don't return from exception. |
66 | */ | 66 | */ |
67 | .macro v7m_exception_entry | 67 | .macro v7m_exception_entry |
68 | @ determine the location of the registers saved by the core during | 68 | @ determine the location of the registers saved by the core during |
69 | @ exception entry. Depending on the mode the cpu was in when the | 69 | @ exception entry. Depending on the mode the cpu was in when the |
70 | @ exception happend that is either on the main or the process stack. | 70 | @ exception happend that is either on the main or the process stack. |
71 | @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack | 71 | @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack |
72 | @ was used. | 72 | @ was used. |
73 | tst lr, #EXC_RET_STACK_MASK | 73 | tst lr, #EXC_RET_STACK_MASK |
74 | mrsne r12, psp | 74 | mrsne r12, psp |
75 | moveq r12, sp | 75 | moveq r12, sp |
76 | 76 | ||
77 | @ we cannot rely on r0-r3 and r12 matching the value saved in the | 77 | @ we cannot rely on r0-r3 and r12 matching the value saved in the |
78 | @ exception frame because of tail-chaining. So these have to be | 78 | @ exception frame because of tail-chaining. So these have to be |
79 | @ reloaded. | 79 | @ reloaded. |
80 | ldmia r12!, {r0-r3} | 80 | ldmia r12!, {r0-r3} |
81 | 81 | ||
82 | @ Linux expects to have irqs off. Do it here before taking stack space | 82 | @ Linux expects to have irqs off. Do it here before taking stack space |
83 | cpsid i | 83 | cpsid i |
84 | 84 | ||
85 | sub sp, #S_FRAME_SIZE-S_IP | 85 | sub sp, #S_FRAME_SIZE-S_IP |
86 | stmdb sp!, {r0-r11} | 86 | stmdb sp!, {r0-r11} |
87 | 87 | ||
88 | @ load saved r12, lr, return address and xPSR. | 88 | @ load saved r12, lr, return address and xPSR. |
89 | @ r0-r7 are used for signals and never touched from now on. Clobbering | 89 | @ r0-r7 are used for signals and never touched from now on. Clobbering |
90 | @ r8-r12 is OK. | 90 | @ r8-r12 is OK. |
91 | mov r9, r12 | 91 | mov r9, r12 |
92 | ldmia r9!, {r8, r10-r12} | 92 | ldmia r9!, {r8, r10-r12} |
93 | 93 | ||
94 | @ calculate the original stack pointer value. | 94 | @ calculate the original stack pointer value. |
95 | @ r9 currently points to the memory location just above the auto saved | 95 | @ r9 currently points to the memory location just above the auto saved |
96 | @ xPSR. | 96 | @ xPSR. |
97 | @ The cpu might automatically 8-byte align the stack. Bit 9 | 97 | @ The cpu might automatically 8-byte align the stack. Bit 9 |
98 | @ of the saved xPSR specifies if stack aligning took place. In this case | 98 | @ of the saved xPSR specifies if stack aligning took place. In this case |
99 | @ another 32-bit value is included in the stack. | 99 | @ another 32-bit value is included in the stack. |
100 | 100 | ||
101 | tst r12, V7M_xPSR_FRAMEPTRALIGN | 101 | tst r12, V7M_xPSR_FRAMEPTRALIGN |
102 | addne r9, r9, #4 | 102 | addne r9, r9, #4 |
103 | 103 | ||
104 | @ store saved r12 using str to have a register to hold the base for stm | 104 | @ store saved r12 using str to have a register to hold the base for stm |
105 | str r8, [sp, #S_IP] | 105 | str r8, [sp, #S_IP] |
106 | add r8, sp, #S_SP | 106 | add r8, sp, #S_SP |
107 | @ store r13-r15, xPSR | 107 | @ store r13-r15, xPSR |
108 | stmia r8!, {r9-r12} | 108 | stmia r8!, {r9-r12} |
109 | @ store old_r0 | 109 | @ store old_r0 |
110 | str r0, [r8] | 110 | str r0, [r8] |
111 | .endm | 111 | .endm |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * PENDSV and SVCALL are configured to have the same exception | 114 | * PENDSV and SVCALL are configured to have the same exception |
115 | * priorities. As a kernel thread runs at SVCALL execution priority it | 115 | * priorities. As a kernel thread runs at SVCALL execution priority it |
116 | * can never be preempted and so we will never have to return to a | 116 | * can never be preempted and so we will never have to return to a |
117 | * kernel thread here. | 117 | * kernel thread here. |
118 | */ | 118 | */ |
119 | .macro v7m_exception_slow_exit ret_r0 | 119 | .macro v7m_exception_slow_exit ret_r0 |
120 | cpsid i | 120 | cpsid i |
121 | ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK | 121 | ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK |
122 | 122 | ||
123 | @ read original r12, sp, lr, pc and xPSR | 123 | @ read original r12, sp, lr, pc and xPSR |
124 | add r12, sp, #S_IP | 124 | add r12, sp, #S_IP |
125 | ldmia r12, {r1-r5} | 125 | ldmia r12, {r1-r5} |
126 | 126 | ||
127 | @ an exception frame is always 8-byte aligned. To tell the hardware if | 127 | @ an exception frame is always 8-byte aligned. To tell the hardware if |
128 | @ the sp to be restored is aligned or not set bit 9 of the saved xPSR | 128 | @ the sp to be restored is aligned or not set bit 9 of the saved xPSR |
129 | @ accordingly. | 129 | @ accordingly. |
130 | tst r2, #4 | 130 | tst r2, #4 |
131 | subne r2, r2, #4 | 131 | subne r2, r2, #4 |
132 | orrne r5, V7M_xPSR_FRAMEPTRALIGN | 132 | orrne r5, V7M_xPSR_FRAMEPTRALIGN |
133 | biceq r5, V7M_xPSR_FRAMEPTRALIGN | 133 | biceq r5, V7M_xPSR_FRAMEPTRALIGN |
134 | 134 | ||
135 | @ ensure bit 0 is cleared in the PC, otherwise behaviour is | ||
136 | @ unpredictable | ||
137 | bic r4, #1 | ||
138 | |||
135 | @ write basic exception frame | 139 | @ write basic exception frame |
136 | stmdb r2!, {r1, r3-r5} | 140 | stmdb r2!, {r1, r3-r5} |
137 | ldmia sp, {r1, r3-r5} | 141 | ldmia sp, {r1, r3-r5} |
138 | .if \ret_r0 | 142 | .if \ret_r0 |
139 | stmdb r2!, {r0, r3-r5} | 143 | stmdb r2!, {r0, r3-r5} |
140 | .else | 144 | .else |
141 | stmdb r2!, {r1, r3-r5} | 145 | stmdb r2!, {r1, r3-r5} |
142 | .endif | 146 | .endif |
143 | 147 | ||
144 | @ restore process sp | 148 | @ restore process sp |
145 | msr psp, r2 | 149 | msr psp, r2 |
146 | 150 | ||
147 | @ restore original r4-r11 | 151 | @ restore original r4-r11 |
148 | ldmia sp!, {r0-r11} | 152 | ldmia sp!, {r0-r11} |
149 | 153 | ||
150 | @ restore main sp | 154 | @ restore main sp |
151 | add sp, sp, #S_FRAME_SIZE-S_IP | 155 | add sp, sp, #S_FRAME_SIZE-S_IP |
152 | 156 | ||
153 | cpsie i | 157 | cpsie i |
154 | bx lr | 158 | bx lr |
155 | .endm | 159 | .endm |
156 | #endif /* CONFIG_CPU_V7M */ | 160 | #endif /* CONFIG_CPU_V7M */ |
157 | 161 | ||
158 | @ | 162 | @ |
159 | @ Store/load the USER SP and LR registers by switching to the SYS | 163 | @ Store/load the USER SP and LR registers by switching to the SYS |
160 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not | 164 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not |
161 | @ available. Should only be called from SVC mode | 165 | @ available. Should only be called from SVC mode |
162 | @ | 166 | @ |
163 | .macro store_user_sp_lr, rd, rtemp, offset = 0 | 167 | .macro store_user_sp_lr, rd, rtemp, offset = 0 |
164 | mrs \rtemp, cpsr | 168 | mrs \rtemp, cpsr |
165 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | 169 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
166 | msr cpsr_c, \rtemp @ switch to the SYS mode | 170 | msr cpsr_c, \rtemp @ switch to the SYS mode |
167 | 171 | ||
168 | str sp, [\rd, #\offset] @ save sp_usr | 172 | str sp, [\rd, #\offset] @ save sp_usr |
169 | str lr, [\rd, #\offset + 4] @ save lr_usr | 173 | str lr, [\rd, #\offset + 4] @ save lr_usr |
170 | 174 | ||
171 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | 175 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
172 | msr cpsr_c, \rtemp @ switch back to the SVC mode | 176 | msr cpsr_c, \rtemp @ switch back to the SVC mode |
173 | .endm | 177 | .endm |
174 | 178 | ||
175 | .macro load_user_sp_lr, rd, rtemp, offset = 0 | 179 | .macro load_user_sp_lr, rd, rtemp, offset = 0 |
176 | mrs \rtemp, cpsr | 180 | mrs \rtemp, cpsr |
177 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | 181 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
178 | msr cpsr_c, \rtemp @ switch to the SYS mode | 182 | msr cpsr_c, \rtemp @ switch to the SYS mode |
179 | 183 | ||
180 | ldr sp, [\rd, #\offset] @ load sp_usr | 184 | ldr sp, [\rd, #\offset] @ load sp_usr |
181 | ldr lr, [\rd, #\offset + 4] @ load lr_usr | 185 | ldr lr, [\rd, #\offset + 4] @ load lr_usr |
182 | 186 | ||
183 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | 187 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) |
184 | msr cpsr_c, \rtemp @ switch back to the SVC mode | 188 | msr cpsr_c, \rtemp @ switch back to the SVC mode |
185 | .endm | 189 | .endm |
186 | 190 | ||
187 | #ifndef CONFIG_THUMB2_KERNEL | 191 | #ifndef CONFIG_THUMB2_KERNEL |
188 | .macro svc_exit, rpsr, irq = 0 | 192 | .macro svc_exit, rpsr, irq = 0 |
189 | .if \irq != 0 | 193 | .if \irq != 0 |
190 | @ IRQs already off | 194 | @ IRQs already off |
191 | #ifdef CONFIG_TRACE_IRQFLAGS | 195 | #ifdef CONFIG_TRACE_IRQFLAGS |
192 | @ The parent context IRQs must have been enabled to get here in | 196 | @ The parent context IRQs must have been enabled to get here in |
193 | @ the first place, so there's no point checking the PSR I bit. | 197 | @ the first place, so there's no point checking the PSR I bit. |
194 | bl trace_hardirqs_on | 198 | bl trace_hardirqs_on |
195 | #endif | 199 | #endif |
196 | .else | 200 | .else |
197 | @ IRQs off again before pulling preserved data off the stack | 201 | @ IRQs off again before pulling preserved data off the stack |
198 | disable_irq_notrace | 202 | disable_irq_notrace |
199 | #ifdef CONFIG_TRACE_IRQFLAGS | 203 | #ifdef CONFIG_TRACE_IRQFLAGS |
200 | tst \rpsr, #PSR_I_BIT | 204 | tst \rpsr, #PSR_I_BIT |
201 | bleq trace_hardirqs_on | 205 | bleq trace_hardirqs_on |
202 | tst \rpsr, #PSR_I_BIT | 206 | tst \rpsr, #PSR_I_BIT |
203 | blne trace_hardirqs_off | 207 | blne trace_hardirqs_off |
204 | #endif | 208 | #endif |
205 | .endif | 209 | .endif |
206 | msr spsr_cxsf, \rpsr | 210 | msr spsr_cxsf, \rpsr |
207 | #if defined(CONFIG_CPU_V6) | 211 | #if defined(CONFIG_CPU_V6) |
208 | ldr r0, [sp] | 212 | ldr r0, [sp] |
209 | strex r1, r2, [sp] @ clear the exclusive monitor | 213 | strex r1, r2, [sp] @ clear the exclusive monitor |
210 | ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr | 214 | ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr |
211 | #elif defined(CONFIG_CPU_32v6K) | 215 | #elif defined(CONFIG_CPU_32v6K) |
212 | clrex @ clear the exclusive monitor | 216 | clrex @ clear the exclusive monitor |
213 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | 217 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr |
214 | #else | 218 | #else |
215 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | 219 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr |
216 | #endif | 220 | #endif |
217 | .endm | 221 | .endm |
218 | 222 | ||
219 | .macro restore_user_regs, fast = 0, offset = 0 | 223 | .macro restore_user_regs, fast = 0, offset = 0 |
220 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr | 224 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr |
221 | ldr lr, [sp, #\offset + S_PC]! @ get pc | 225 | ldr lr, [sp, #\offset + S_PC]! @ get pc |
222 | msr spsr_cxsf, r1 @ save in spsr_svc | 226 | msr spsr_cxsf, r1 @ save in spsr_svc |
223 | #if defined(CONFIG_CPU_V6) | 227 | #if defined(CONFIG_CPU_V6) |
224 | strex r1, r2, [sp] @ clear the exclusive monitor | 228 | strex r1, r2, [sp] @ clear the exclusive monitor |
225 | #elif defined(CONFIG_CPU_32v6K) | 229 | #elif defined(CONFIG_CPU_32v6K) |
226 | clrex @ clear the exclusive monitor | 230 | clrex @ clear the exclusive monitor |
227 | #endif | 231 | #endif |
228 | .if \fast | 232 | .if \fast |
229 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr | 233 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr |
230 | .else | 234 | .else |
231 | ldmdb sp, {r0 - lr}^ @ get calling r0 - lr | 235 | ldmdb sp, {r0 - lr}^ @ get calling r0 - lr |
232 | .endif | 236 | .endif |
233 | mov r0, r0 @ ARMv5T and earlier require a nop | 237 | mov r0, r0 @ ARMv5T and earlier require a nop |
234 | @ after ldm {}^ | 238 | @ after ldm {}^ |
235 | add sp, sp, #S_FRAME_SIZE - S_PC | 239 | add sp, sp, #S_FRAME_SIZE - S_PC |
236 | movs pc, lr @ return & move spsr_svc into cpsr | 240 | movs pc, lr @ return & move spsr_svc into cpsr |
237 | .endm | 241 | .endm |
238 | 242 | ||
239 | @ | 243 | @ |
240 | @ 32-bit wide "mov pc, reg" | 244 | @ 32-bit wide "mov pc, reg" |
241 | @ | 245 | @ |
242 | .macro movw_pc, reg | 246 | .macro movw_pc, reg |
243 | mov pc, \reg | 247 | mov pc, \reg |
244 | .endm | 248 | .endm |
245 | #else /* CONFIG_THUMB2_KERNEL */ | 249 | #else /* CONFIG_THUMB2_KERNEL */ |
246 | .macro svc_exit, rpsr, irq = 0 | 250 | .macro svc_exit, rpsr, irq = 0 |
247 | .if \irq != 0 | 251 | .if \irq != 0 |
248 | @ IRQs already off | 252 | @ IRQs already off |
249 | #ifdef CONFIG_TRACE_IRQFLAGS | 253 | #ifdef CONFIG_TRACE_IRQFLAGS |
250 | @ The parent context IRQs must have been enabled to get here in | 254 | @ The parent context IRQs must have been enabled to get here in |
251 | @ the first place, so there's no point checking the PSR I bit. | 255 | @ the first place, so there's no point checking the PSR I bit. |
252 | bl trace_hardirqs_on | 256 | bl trace_hardirqs_on |
253 | #endif | 257 | #endif |
254 | .else | 258 | .else |
255 | @ IRQs off again before pulling preserved data off the stack | 259 | @ IRQs off again before pulling preserved data off the stack |
256 | disable_irq_notrace | 260 | disable_irq_notrace |
257 | #ifdef CONFIG_TRACE_IRQFLAGS | 261 | #ifdef CONFIG_TRACE_IRQFLAGS |
258 | tst \rpsr, #PSR_I_BIT | 262 | tst \rpsr, #PSR_I_BIT |
259 | bleq trace_hardirqs_on | 263 | bleq trace_hardirqs_on |
260 | tst \rpsr, #PSR_I_BIT | 264 | tst \rpsr, #PSR_I_BIT |
261 | blne trace_hardirqs_off | 265 | blne trace_hardirqs_off |
262 | #endif | 266 | #endif |
263 | .endif | 267 | .endif |
264 | ldr lr, [sp, #S_SP] @ top of the stack | 268 | ldr lr, [sp, #S_SP] @ top of the stack |
265 | ldrd r0, r1, [sp, #S_LR] @ calling lr and pc | 269 | ldrd r0, r1, [sp, #S_LR] @ calling lr and pc |
266 | clrex @ clear the exclusive monitor | 270 | clrex @ clear the exclusive monitor |
267 | stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context | 271 | stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context |
268 | ldmia sp, {r0 - r12} | 272 | ldmia sp, {r0 - r12} |
269 | mov sp, lr | 273 | mov sp, lr |
270 | ldr lr, [sp], #4 | 274 | ldr lr, [sp], #4 |
271 | rfeia sp! | 275 | rfeia sp! |
272 | .endm | 276 | .endm |
273 | 277 | ||
274 | #ifdef CONFIG_CPU_V7M | 278 | #ifdef CONFIG_CPU_V7M |
275 | /* | 279 | /* |
276 | * Note we don't need to do clrex here as clearing the local monitor is | 280 | * Note we don't need to do clrex here as clearing the local monitor is |
277 | * part of each exception entry and exit sequence. | 281 | * part of each exception entry and exit sequence. |
278 | */ | 282 | */ |
279 | .macro restore_user_regs, fast = 0, offset = 0 | 283 | .macro restore_user_regs, fast = 0, offset = 0 |
280 | .if \offset | 284 | .if \offset |
281 | add sp, #\offset | 285 | add sp, #\offset |
282 | .endif | 286 | .endif |
283 | v7m_exception_slow_exit ret_r0 = \fast | 287 | v7m_exception_slow_exit ret_r0 = \fast |
284 | .endm | 288 | .endm |
285 | #else /* ifdef CONFIG_CPU_V7M */ | 289 | #else /* ifdef CONFIG_CPU_V7M */ |
286 | .macro restore_user_regs, fast = 0, offset = 0 | 290 | .macro restore_user_regs, fast = 0, offset = 0 |
287 | clrex @ clear the exclusive monitor | 291 | clrex @ clear the exclusive monitor |
288 | mov r2, sp | 292 | mov r2, sp |
289 | load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr | 293 | load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr |
290 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr | 294 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr |
291 | ldr lr, [sp, #\offset + S_PC] @ get pc | 295 | ldr lr, [sp, #\offset + S_PC] @ get pc |
292 | add sp, sp, #\offset + S_SP | 296 | add sp, sp, #\offset + S_SP |
293 | msr spsr_cxsf, r1 @ save in spsr_svc | 297 | msr spsr_cxsf, r1 @ save in spsr_svc |
294 | .if \fast | 298 | .if \fast |
295 | ldmdb sp, {r1 - r12} @ get calling r1 - r12 | 299 | ldmdb sp, {r1 - r12} @ get calling r1 - r12 |
296 | .else | 300 | .else |
297 | ldmdb sp, {r0 - r12} @ get calling r0 - r12 | 301 | ldmdb sp, {r0 - r12} @ get calling r0 - r12 |
298 | .endif | 302 | .endif |
299 | add sp, sp, #S_FRAME_SIZE - S_SP | 303 | add sp, sp, #S_FRAME_SIZE - S_SP |
300 | movs pc, lr @ return & move spsr_svc into cpsr | 304 | movs pc, lr @ return & move spsr_svc into cpsr |
301 | .endm | 305 | .endm |
302 | #endif /* ifdef CONFIG_CPU_V7M / else */ | 306 | #endif /* ifdef CONFIG_CPU_V7M / else */ |
303 | 307 | ||
304 | @ | 308 | @ |
305 | @ 32-bit wide "mov pc, reg" | 309 | @ 32-bit wide "mov pc, reg" |
306 | @ | 310 | @ |
307 | .macro movw_pc, reg | 311 | .macro movw_pc, reg |
308 | mov pc, \reg | 312 | mov pc, \reg |
309 | nop | 313 | nop |
310 | .endm | 314 | .endm |
311 | #endif /* !CONFIG_THUMB2_KERNEL */ | 315 | #endif /* !CONFIG_THUMB2_KERNEL */ |
312 | 316 | ||
313 | /* | 317 | /* |
314 | * Context tracking subsystem. Used to instrument transitions | 318 | * Context tracking subsystem. Used to instrument transitions |
315 | * between user and kernel mode. | 319 | * between user and kernel mode. |
316 | */ | 320 | */ |
317 | .macro ct_user_exit, save = 1 | 321 | .macro ct_user_exit, save = 1 |
318 | #ifdef CONFIG_CONTEXT_TRACKING | 322 | #ifdef CONFIG_CONTEXT_TRACKING |
319 | .if \save | 323 | .if \save |
320 | stmdb sp!, {r0-r3, ip, lr} | 324 | stmdb sp!, {r0-r3, ip, lr} |
321 | bl context_tracking_user_exit | 325 | bl context_tracking_user_exit |
322 | ldmia sp!, {r0-r3, ip, lr} | 326 | ldmia sp!, {r0-r3, ip, lr} |
323 | .else | 327 | .else |
324 | bl context_tracking_user_exit | 328 | bl context_tracking_user_exit |
325 | .endif | 329 | .endif |
326 | #endif | 330 | #endif |
327 | .endm | 331 | .endm |
328 | 332 | ||
329 | .macro ct_user_enter, save = 1 | 333 | .macro ct_user_enter, save = 1 |
330 | #ifdef CONFIG_CONTEXT_TRACKING | 334 | #ifdef CONFIG_CONTEXT_TRACKING |
331 | .if \save | 335 | .if \save |
332 | stmdb sp!, {r0-r3, ip, lr} | 336 | stmdb sp!, {r0-r3, ip, lr} |
333 | bl context_tracking_user_enter | 337 | bl context_tracking_user_enter |
334 | ldmia sp!, {r0-r3, ip, lr} | 338 | ldmia sp!, {r0-r3, ip, lr} |
335 | .else | 339 | .else |
336 | bl context_tracking_user_enter | 340 | bl context_tracking_user_enter |
337 | .endif | 341 | .endif |
338 | #endif | 342 | #endif |
339 | .endm | 343 | .endm |
340 | 344 | ||
341 | /* | 345 | /* |
342 | * These are the registers used in the syscall handler, and allow us to | 346 | * These are the registers used in the syscall handler, and allow us to |
343 | * have in theory up to 7 arguments to a function - r0 to r6. | 347 | * have in theory up to 7 arguments to a function - r0 to r6. |
344 | * | 348 | * |
345 | * r7 is reserved for the system call number for thumb mode. | 349 | * r7 is reserved for the system call number for thumb mode. |
346 | * | 350 | * |
347 | * Note that tbl == why is intentional. | 351 | * Note that tbl == why is intentional. |
348 | * | 352 | * |
349 | * We must set at least "tsk" and "why" when calling ret_with_reschedule. | 353 | * We must set at least "tsk" and "why" when calling ret_with_reschedule. |
350 | */ | 354 | */ |
351 | scno .req r7 @ syscall number | 355 | scno .req r7 @ syscall number |
352 | tbl .req r8 @ syscall table pointer | 356 | tbl .req r8 @ syscall table pointer |
353 | why .req r8 @ Linux syscall (!= 0) | 357 | why .req r8 @ Linux syscall (!= 0) |
354 | tsk .req r9 @ current thread_info | 358 | tsk .req r9 @ current thread_info |
355 | 359 |
1 | /* | 1 | /* |
2 | * arch/arm/kernel/unwind.c | 2 | * arch/arm/kernel/unwind.c |
3 | * | 3 | * |
4 | * Copyright (C) 2008 ARM Limited | 4 | * Copyright (C) 2008 ARM Limited |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | * | 18 | * |
19 | * | 19 | * |
20 | * Stack unwinding support for ARM | 20 | * Stack unwinding support for ARM |
21 | * | 21 | * |
22 | * An ARM EABI version of gcc is required to generate the unwind | 22 | * An ARM EABI version of gcc is required to generate the unwind |
23 | * tables. For information about the structure of the unwind tables, | 23 | * tables. For information about the structure of the unwind tables, |
24 | * see "Exception Handling ABI for the ARM Architecture" at: | 24 | * see "Exception Handling ABI for the ARM Architecture" at: |
25 | * | 25 | * |
26 | * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html | 26 | * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #ifndef __CHECKER__ | 29 | #ifndef __CHECKER__ |
30 | #if !defined (__ARM_EABI__) | 30 | #if !defined (__ARM_EABI__) |
31 | #warning Your compiler does not have EABI support. | 31 | #warning Your compiler does not have EABI support. |
32 | #warning ARM unwind is known to compile only with EABI compilers. | 32 | #warning ARM unwind is known to compile only with EABI compilers. |
33 | #warning Change compiler or disable ARM_UNWIND option. | 33 | #warning Change compiler or disable ARM_UNWIND option. |
34 | #elif (__GNUC__ == 4 && __GNUC_MINOR__ <= 2) | 34 | #elif (__GNUC__ == 4 && __GNUC_MINOR__ <= 2) |
35 | #warning Your compiler is too buggy; it is known to not compile ARM unwind support. | 35 | #warning Your compiler is too buggy; it is known to not compile ARM unwind support. |
36 | #warning Change compiler or disable ARM_UNWIND option. | 36 | #warning Change compiler or disable ARM_UNWIND option. |
37 | #endif | 37 | #endif |
38 | #endif /* __CHECKER__ */ | 38 | #endif /* __CHECKER__ */ |
39 | 39 | ||
40 | #include <linux/kernel.h> | 40 | #include <linux/kernel.h> |
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/export.h> | 42 | #include <linux/export.h> |
43 | #include <linux/sched.h> | 43 | #include <linux/sched.h> |
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #include <linux/spinlock.h> | 45 | #include <linux/spinlock.h> |
46 | #include <linux/list.h> | 46 | #include <linux/list.h> |
47 | 47 | ||
48 | #include <asm/stacktrace.h> | 48 | #include <asm/stacktrace.h> |
49 | #include <asm/traps.h> | 49 | #include <asm/traps.h> |
50 | #include <asm/unwind.h> | 50 | #include <asm/unwind.h> |
51 | 51 | ||
52 | /* Dummy functions to avoid linker complaints */ | 52 | /* Dummy functions to avoid linker complaints */ |
53 | void __aeabi_unwind_cpp_pr0(void) | 53 | void __aeabi_unwind_cpp_pr0(void) |
54 | { | 54 | { |
55 | }; | 55 | }; |
56 | EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0); | 56 | EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0); |
57 | 57 | ||
58 | void __aeabi_unwind_cpp_pr1(void) | 58 | void __aeabi_unwind_cpp_pr1(void) |
59 | { | 59 | { |
60 | }; | 60 | }; |
61 | EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1); | 61 | EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1); |
62 | 62 | ||
63 | void __aeabi_unwind_cpp_pr2(void) | 63 | void __aeabi_unwind_cpp_pr2(void) |
64 | { | 64 | { |
65 | }; | 65 | }; |
66 | EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2); | 66 | EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2); |
67 | 67 | ||
68 | struct unwind_ctrl_block { | 68 | struct unwind_ctrl_block { |
69 | unsigned long vrs[16]; /* virtual register set */ | 69 | unsigned long vrs[16]; /* virtual register set */ |
70 | const unsigned long *insn; /* pointer to the current instructions word */ | 70 | const unsigned long *insn; /* pointer to the current instructions word */ |
71 | unsigned long sp_high; /* highest value of sp allowed */ | 71 | unsigned long sp_high; /* highest value of sp allowed */ |
72 | /* | 72 | /* |
73 | * 1 : check for stack overflow for each register pop. | 73 | * 1 : check for stack overflow for each register pop. |
74 | * 0 : save overhead if there is plenty of stack remaining. | 74 | * 0 : save overhead if there is plenty of stack remaining. |
75 | */ | 75 | */ |
76 | int check_each_pop; | 76 | int check_each_pop; |
77 | int entries; /* number of entries left to interpret */ | 77 | int entries; /* number of entries left to interpret */ |
78 | int byte; /* current byte number in the instructions word */ | 78 | int byte; /* current byte number in the instructions word */ |
79 | }; | 79 | }; |
80 | 80 | ||
81 | enum regs { | 81 | enum regs { |
82 | #ifdef CONFIG_THUMB2_KERNEL | 82 | #ifdef CONFIG_THUMB2_KERNEL |
83 | FP = 7, | 83 | FP = 7, |
84 | #else | 84 | #else |
85 | FP = 11, | 85 | FP = 11, |
86 | #endif | 86 | #endif |
87 | SP = 13, | 87 | SP = 13, |
88 | LR = 14, | 88 | LR = 14, |
89 | PC = 15 | 89 | PC = 15 |
90 | }; | 90 | }; |
91 | 91 | ||
92 | extern const struct unwind_idx __start_unwind_idx[]; | 92 | extern const struct unwind_idx __start_unwind_idx[]; |
93 | static const struct unwind_idx *__origin_unwind_idx; | 93 | static const struct unwind_idx *__origin_unwind_idx; |
94 | extern const struct unwind_idx __stop_unwind_idx[]; | 94 | extern const struct unwind_idx __stop_unwind_idx[]; |
95 | 95 | ||
96 | static DEFINE_SPINLOCK(unwind_lock); | 96 | static DEFINE_SPINLOCK(unwind_lock); |
97 | static LIST_HEAD(unwind_tables); | 97 | static LIST_HEAD(unwind_tables); |
98 | 98 | ||
99 | /* Convert a prel31 symbol to an absolute address */ | 99 | /* Convert a prel31 symbol to an absolute address */ |
100 | #define prel31_to_addr(ptr) \ | 100 | #define prel31_to_addr(ptr) \ |
101 | ({ \ | 101 | ({ \ |
102 | /* sign-extend to 32 bits */ \ | 102 | /* sign-extend to 32 bits */ \ |
103 | long offset = (((long)*(ptr)) << 1) >> 1; \ | 103 | long offset = (((long)*(ptr)) << 1) >> 1; \ |
104 | (unsigned long)(ptr) + offset; \ | 104 | (unsigned long)(ptr) + offset; \ |
105 | }) | 105 | }) |
106 | 106 | ||
107 | /* | 107 | /* |
108 | * Binary search in the unwind index. The entries are | 108 | * Binary search in the unwind index. The entries are |
109 | * guaranteed to be sorted in ascending order by the linker. | 109 | * guaranteed to be sorted in ascending order by the linker. |
110 | * | 110 | * |
111 | * start = first entry | 111 | * start = first entry |
112 | * origin = first entry with positive offset (or stop if there is no such entry) | 112 | * origin = first entry with positive offset (or stop if there is no such entry) |
113 | * stop - 1 = last entry | 113 | * stop - 1 = last entry |
114 | */ | 114 | */ |
115 | static const struct unwind_idx *search_index(unsigned long addr, | 115 | static const struct unwind_idx *search_index(unsigned long addr, |
116 | const struct unwind_idx *start, | 116 | const struct unwind_idx *start, |
117 | const struct unwind_idx *origin, | 117 | const struct unwind_idx *origin, |
118 | const struct unwind_idx *stop) | 118 | const struct unwind_idx *stop) |
119 | { | 119 | { |
120 | unsigned long addr_prel31; | 120 | unsigned long addr_prel31; |
121 | 121 | ||
122 | pr_debug("%s(%08lx, %p, %p, %p)\n", | 122 | pr_debug("%s(%08lx, %p, %p, %p)\n", |
123 | __func__, addr, start, origin, stop); | 123 | __func__, addr, start, origin, stop); |
124 | 124 | ||
125 | /* | 125 | /* |
126 | * only search in the section with the matching sign. This way the | 126 | * only search in the section with the matching sign. This way the |
127 | * prel31 numbers can be compared as unsigned longs. | 127 | * prel31 numbers can be compared as unsigned longs. |
128 | */ | 128 | */ |
129 | if (addr < (unsigned long)start) | 129 | if (addr < (unsigned long)start) |
130 | /* negative offsets: [start; origin) */ | 130 | /* negative offsets: [start; origin) */ |
131 | stop = origin; | 131 | stop = origin; |
132 | else | 132 | else |
133 | /* positive offsets: [origin; stop) */ | 133 | /* positive offsets: [origin; stop) */ |
134 | start = origin; | 134 | start = origin; |
135 | 135 | ||
136 | /* prel31 for address relavive to start */ | 136 | /* prel31 for address relavive to start */ |
137 | addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff; | 137 | addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff; |
138 | 138 | ||
139 | while (start < stop - 1) { | 139 | while (start < stop - 1) { |
140 | const struct unwind_idx *mid = start + ((stop - start) >> 1); | 140 | const struct unwind_idx *mid = start + ((stop - start) >> 1); |
141 | 141 | ||
142 | /* | 142 | /* |
143 | * As addr_prel31 is relative to start an offset is needed to | 143 | * As addr_prel31 is relative to start an offset is needed to |
144 | * make it relative to mid. | 144 | * make it relative to mid. |
145 | */ | 145 | */ |
146 | if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) < | 146 | if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) < |
147 | mid->addr_offset) | 147 | mid->addr_offset) |
148 | stop = mid; | 148 | stop = mid; |
149 | else { | 149 | else { |
150 | /* keep addr_prel31 relative to start */ | 150 | /* keep addr_prel31 relative to start */ |
151 | addr_prel31 -= ((unsigned long)mid - | 151 | addr_prel31 -= ((unsigned long)mid - |
152 | (unsigned long)start); | 152 | (unsigned long)start); |
153 | start = mid; | 153 | start = mid; |
154 | } | 154 | } |
155 | } | 155 | } |
156 | 156 | ||
157 | if (likely(start->addr_offset <= addr_prel31)) | 157 | if (likely(start->addr_offset <= addr_prel31)) |
158 | return start; | 158 | return start; |
159 | else { | 159 | else { |
160 | pr_warning("unwind: Unknown symbol address %08lx\n", addr); | 160 | pr_warning("unwind: Unknown symbol address %08lx\n", addr); |
161 | return NULL; | 161 | return NULL; |
162 | } | 162 | } |
163 | } | 163 | } |
164 | 164 | ||
165 | static const struct unwind_idx *unwind_find_origin( | 165 | static const struct unwind_idx *unwind_find_origin( |
166 | const struct unwind_idx *start, const struct unwind_idx *stop) | 166 | const struct unwind_idx *start, const struct unwind_idx *stop) |
167 | { | 167 | { |
168 | pr_debug("%s(%p, %p)\n", __func__, start, stop); | 168 | pr_debug("%s(%p, %p)\n", __func__, start, stop); |
169 | while (start < stop) { | 169 | while (start < stop) { |
170 | const struct unwind_idx *mid = start + ((stop - start) >> 1); | 170 | const struct unwind_idx *mid = start + ((stop - start) >> 1); |
171 | 171 | ||
172 | if (mid->addr_offset >= 0x40000000) | 172 | if (mid->addr_offset >= 0x40000000) |
173 | /* negative offset */ | 173 | /* negative offset */ |
174 | start = mid + 1; | 174 | start = mid + 1; |
175 | else | 175 | else |
176 | /* positive offset */ | 176 | /* positive offset */ |
177 | stop = mid; | 177 | stop = mid; |
178 | } | 178 | } |
179 | pr_debug("%s -> %p\n", __func__, stop); | 179 | pr_debug("%s -> %p\n", __func__, stop); |
180 | return stop; | 180 | return stop; |
181 | } | 181 | } |
182 | 182 | ||
183 | static const struct unwind_idx *unwind_find_idx(unsigned long addr) | 183 | static const struct unwind_idx *unwind_find_idx(unsigned long addr) |
184 | { | 184 | { |
185 | const struct unwind_idx *idx = NULL; | 185 | const struct unwind_idx *idx = NULL; |
186 | unsigned long flags; | 186 | unsigned long flags; |
187 | 187 | ||
188 | pr_debug("%s(%08lx)\n", __func__, addr); | 188 | pr_debug("%s(%08lx)\n", __func__, addr); |
189 | 189 | ||
190 | if (core_kernel_text(addr)) { | 190 | if (core_kernel_text(addr)) { |
191 | if (unlikely(!__origin_unwind_idx)) | 191 | if (unlikely(!__origin_unwind_idx)) |
192 | __origin_unwind_idx = | 192 | __origin_unwind_idx = |
193 | unwind_find_origin(__start_unwind_idx, | 193 | unwind_find_origin(__start_unwind_idx, |
194 | __stop_unwind_idx); | 194 | __stop_unwind_idx); |
195 | 195 | ||
196 | /* main unwind table */ | 196 | /* main unwind table */ |
197 | idx = search_index(addr, __start_unwind_idx, | 197 | idx = search_index(addr, __start_unwind_idx, |
198 | __origin_unwind_idx, | 198 | __origin_unwind_idx, |
199 | __stop_unwind_idx); | 199 | __stop_unwind_idx); |
200 | } else { | 200 | } else { |
201 | /* module unwind tables */ | 201 | /* module unwind tables */ |
202 | struct unwind_table *table; | 202 | struct unwind_table *table; |
203 | 203 | ||
204 | spin_lock_irqsave(&unwind_lock, flags); | 204 | spin_lock_irqsave(&unwind_lock, flags); |
205 | list_for_each_entry(table, &unwind_tables, list) { | 205 | list_for_each_entry(table, &unwind_tables, list) { |
206 | if (addr >= table->begin_addr && | 206 | if (addr >= table->begin_addr && |
207 | addr < table->end_addr) { | 207 | addr < table->end_addr) { |
208 | idx = search_index(addr, table->start, | 208 | idx = search_index(addr, table->start, |
209 | table->origin, | 209 | table->origin, |
210 | table->stop); | 210 | table->stop); |
211 | /* Move-to-front to exploit common traces */ | 211 | /* Move-to-front to exploit common traces */ |
212 | list_move(&table->list, &unwind_tables); | 212 | list_move(&table->list, &unwind_tables); |
213 | break; | 213 | break; |
214 | } | 214 | } |
215 | } | 215 | } |
216 | spin_unlock_irqrestore(&unwind_lock, flags); | 216 | spin_unlock_irqrestore(&unwind_lock, flags); |
217 | } | 217 | } |
218 | 218 | ||
219 | pr_debug("%s: idx = %p\n", __func__, idx); | 219 | pr_debug("%s: idx = %p\n", __func__, idx); |
220 | return idx; | 220 | return idx; |
221 | } | 221 | } |
222 | 222 | ||
223 | static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl) | 223 | static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl) |
224 | { | 224 | { |
225 | unsigned long ret; | 225 | unsigned long ret; |
226 | 226 | ||
227 | if (ctrl->entries <= 0) { | 227 | if (ctrl->entries <= 0) { |
228 | pr_warning("unwind: Corrupt unwind table\n"); | 228 | pr_warning("unwind: Corrupt unwind table\n"); |
229 | return 0; | 229 | return 0; |
230 | } | 230 | } |
231 | 231 | ||
232 | ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff; | 232 | ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff; |
233 | 233 | ||
234 | if (ctrl->byte == 0) { | 234 | if (ctrl->byte == 0) { |
235 | ctrl->insn++; | 235 | ctrl->insn++; |
236 | ctrl->entries--; | 236 | ctrl->entries--; |
237 | ctrl->byte = 3; | 237 | ctrl->byte = 3; |
238 | } else | 238 | } else |
239 | ctrl->byte--; | 239 | ctrl->byte--; |
240 | 240 | ||
241 | return ret; | 241 | return ret; |
242 | } | 242 | } |
243 | 243 | ||
244 | /* Before poping a register check whether it is feasible or not */ | 244 | /* Before poping a register check whether it is feasible or not */ |
245 | static int unwind_pop_register(struct unwind_ctrl_block *ctrl, | 245 | static int unwind_pop_register(struct unwind_ctrl_block *ctrl, |
246 | unsigned long **vsp, unsigned int reg) | 246 | unsigned long **vsp, unsigned int reg) |
247 | { | 247 | { |
248 | if (unlikely(ctrl->check_each_pop)) | 248 | if (unlikely(ctrl->check_each_pop)) |
249 | if (*vsp >= (unsigned long *)ctrl->sp_high) | 249 | if (*vsp >= (unsigned long *)ctrl->sp_high) |
250 | return -URC_FAILURE; | 250 | return -URC_FAILURE; |
251 | 251 | ||
252 | ctrl->vrs[reg] = *(*vsp)++; | 252 | ctrl->vrs[reg] = *(*vsp)++; |
253 | return URC_OK; | 253 | return URC_OK; |
254 | } | 254 | } |
255 | 255 | ||
256 | /* Helper functions to execute the instructions */ | 256 | /* Helper functions to execute the instructions */ |
257 | static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl, | 257 | static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl, |
258 | unsigned long mask) | 258 | unsigned long mask) |
259 | { | 259 | { |
260 | unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; | 260 | unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; |
261 | int load_sp, reg = 4; | 261 | int load_sp, reg = 4; |
262 | 262 | ||
263 | load_sp = mask & (1 << (13 - 4)); | 263 | load_sp = mask & (1 << (13 - 4)); |
264 | while (mask) { | 264 | while (mask) { |
265 | if (mask & 1) | 265 | if (mask & 1) |
266 | if (unwind_pop_register(ctrl, &vsp, reg)) | 266 | if (unwind_pop_register(ctrl, &vsp, reg)) |
267 | return -URC_FAILURE; | 267 | return -URC_FAILURE; |
268 | mask >>= 1; | 268 | mask >>= 1; |
269 | reg++; | 269 | reg++; |
270 | } | 270 | } |
271 | if (!load_sp) | 271 | if (!load_sp) |
272 | ctrl->vrs[SP] = (unsigned long)vsp; | 272 | ctrl->vrs[SP] = (unsigned long)vsp; |
273 | 273 | ||
274 | return URC_OK; | 274 | return URC_OK; |
275 | } | 275 | } |
276 | 276 | ||
277 | static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl, | 277 | static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl, |
278 | unsigned long insn) | 278 | unsigned long insn) |
279 | { | 279 | { |
280 | unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; | 280 | unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; |
281 | int reg; | 281 | int reg; |
282 | 282 | ||
283 | /* pop R4-R[4+bbb] */ | 283 | /* pop R4-R[4+bbb] */ |
284 | for (reg = 4; reg <= 4 + (insn & 7); reg++) | 284 | for (reg = 4; reg <= 4 + (insn & 7); reg++) |
285 | if (unwind_pop_register(ctrl, &vsp, reg)) | 285 | if (unwind_pop_register(ctrl, &vsp, reg)) |
286 | return -URC_FAILURE; | 286 | return -URC_FAILURE; |
287 | 287 | ||
288 | if (insn & 0x80) | 288 | if (insn & 0x8) |
289 | if (unwind_pop_register(ctrl, &vsp, 14)) | 289 | if (unwind_pop_register(ctrl, &vsp, 14)) |
290 | return -URC_FAILURE; | 290 | return -URC_FAILURE; |
291 | 291 | ||
292 | ctrl->vrs[SP] = (unsigned long)vsp; | 292 | ctrl->vrs[SP] = (unsigned long)vsp; |
293 | 293 | ||
294 | return URC_OK; | 294 | return URC_OK; |
295 | } | 295 | } |
296 | 296 | ||
297 | static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl, | 297 | static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl, |
298 | unsigned long mask) | 298 | unsigned long mask) |
299 | { | 299 | { |
300 | unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; | 300 | unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; |
301 | int reg = 0; | 301 | int reg = 0; |
302 | 302 | ||
303 | /* pop R0-R3 according to mask */ | 303 | /* pop R0-R3 according to mask */ |
304 | while (mask) { | 304 | while (mask) { |
305 | if (mask & 1) | 305 | if (mask & 1) |
306 | if (unwind_pop_register(ctrl, &vsp, reg)) | 306 | if (unwind_pop_register(ctrl, &vsp, reg)) |
307 | return -URC_FAILURE; | 307 | return -URC_FAILURE; |
308 | mask >>= 1; | 308 | mask >>= 1; |
309 | reg++; | 309 | reg++; |
310 | } | 310 | } |
311 | ctrl->vrs[SP] = (unsigned long)vsp; | 311 | ctrl->vrs[SP] = (unsigned long)vsp; |
312 | 312 | ||
313 | return URC_OK; | 313 | return URC_OK; |
314 | } | 314 | } |
315 | 315 | ||
316 | /* | 316 | /* |
317 | * Execute the current unwind instruction. | 317 | * Execute the current unwind instruction. |
318 | */ | 318 | */ |
319 | static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) | 319 | static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) |
320 | { | 320 | { |
321 | unsigned long insn = unwind_get_byte(ctrl); | 321 | unsigned long insn = unwind_get_byte(ctrl); |
322 | int ret = URC_OK; | 322 | int ret = URC_OK; |
323 | 323 | ||
324 | pr_debug("%s: insn = %08lx\n", __func__, insn); | 324 | pr_debug("%s: insn = %08lx\n", __func__, insn); |
325 | 325 | ||
326 | if ((insn & 0xc0) == 0x00) | 326 | if ((insn & 0xc0) == 0x00) |
327 | ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4; | 327 | ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4; |
328 | else if ((insn & 0xc0) == 0x40) | 328 | else if ((insn & 0xc0) == 0x40) |
329 | ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4; | 329 | ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4; |
330 | else if ((insn & 0xf0) == 0x80) { | 330 | else if ((insn & 0xf0) == 0x80) { |
331 | unsigned long mask; | 331 | unsigned long mask; |
332 | 332 | ||
333 | insn = (insn << 8) | unwind_get_byte(ctrl); | 333 | insn = (insn << 8) | unwind_get_byte(ctrl); |
334 | mask = insn & 0x0fff; | 334 | mask = insn & 0x0fff; |
335 | if (mask == 0) { | 335 | if (mask == 0) { |
336 | pr_warning("unwind: 'Refuse to unwind' instruction %04lx\n", | 336 | pr_warning("unwind: 'Refuse to unwind' instruction %04lx\n", |
337 | insn); | 337 | insn); |
338 | return -URC_FAILURE; | 338 | return -URC_FAILURE; |
339 | } | 339 | } |
340 | 340 | ||
341 | ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask); | 341 | ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask); |
342 | if (ret) | 342 | if (ret) |
343 | goto error; | 343 | goto error; |
344 | } else if ((insn & 0xf0) == 0x90 && | 344 | } else if ((insn & 0xf0) == 0x90 && |
345 | (insn & 0x0d) != 0x0d) | 345 | (insn & 0x0d) != 0x0d) |
346 | ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f]; | 346 | ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f]; |
347 | else if ((insn & 0xf0) == 0xa0) { | 347 | else if ((insn & 0xf0) == 0xa0) { |
348 | ret = unwind_exec_pop_r4_to_rN(ctrl, insn); | 348 | ret = unwind_exec_pop_r4_to_rN(ctrl, insn); |
349 | if (ret) | 349 | if (ret) |
350 | goto error; | 350 | goto error; |
351 | } else if (insn == 0xb0) { | 351 | } else if (insn == 0xb0) { |
352 | if (ctrl->vrs[PC] == 0) | 352 | if (ctrl->vrs[PC] == 0) |
353 | ctrl->vrs[PC] = ctrl->vrs[LR]; | 353 | ctrl->vrs[PC] = ctrl->vrs[LR]; |
354 | /* no further processing */ | 354 | /* no further processing */ |
355 | ctrl->entries = 0; | 355 | ctrl->entries = 0; |
356 | } else if (insn == 0xb1) { | 356 | } else if (insn == 0xb1) { |
357 | unsigned long mask = unwind_get_byte(ctrl); | 357 | unsigned long mask = unwind_get_byte(ctrl); |
358 | 358 | ||
359 | if (mask == 0 || mask & 0xf0) { | 359 | if (mask == 0 || mask & 0xf0) { |
360 | pr_warning("unwind: Spare encoding %04lx\n", | 360 | pr_warning("unwind: Spare encoding %04lx\n", |
361 | (insn << 8) | mask); | 361 | (insn << 8) | mask); |
362 | return -URC_FAILURE; | 362 | return -URC_FAILURE; |
363 | } | 363 | } |
364 | 364 | ||
365 | ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask); | 365 | ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask); |
366 | if (ret) | 366 | if (ret) |
367 | goto error; | 367 | goto error; |
368 | } else if (insn == 0xb2) { | 368 | } else if (insn == 0xb2) { |
369 | unsigned long uleb128 = unwind_get_byte(ctrl); | 369 | unsigned long uleb128 = unwind_get_byte(ctrl); |
370 | 370 | ||
371 | ctrl->vrs[SP] += 0x204 + (uleb128 << 2); | 371 | ctrl->vrs[SP] += 0x204 + (uleb128 << 2); |
372 | } else { | 372 | } else { |
373 | pr_warning("unwind: Unhandled instruction %02lx\n", insn); | 373 | pr_warning("unwind: Unhandled instruction %02lx\n", insn); |
374 | return -URC_FAILURE; | 374 | return -URC_FAILURE; |
375 | } | 375 | } |
376 | 376 | ||
377 | pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__, | 377 | pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__, |
378 | ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]); | 378 | ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]); |
379 | 379 | ||
380 | error: | 380 | error: |
381 | return ret; | 381 | return ret; |
382 | } | 382 | } |
383 | 383 | ||
384 | /* | 384 | /* |
385 | * Unwind a single frame starting with *sp for the symbol at *pc. It | 385 | * Unwind a single frame starting with *sp for the symbol at *pc. It |
386 | * updates the *pc and *sp with the new values. | 386 | * updates the *pc and *sp with the new values. |
387 | */ | 387 | */ |
388 | int unwind_frame(struct stackframe *frame) | 388 | int unwind_frame(struct stackframe *frame) |
389 | { | 389 | { |
390 | unsigned long low; | 390 | unsigned long low; |
391 | const struct unwind_idx *idx; | 391 | const struct unwind_idx *idx; |
392 | struct unwind_ctrl_block ctrl; | 392 | struct unwind_ctrl_block ctrl; |
393 | 393 | ||
394 | /* store the highest address on the stack to avoid crossing it*/ | 394 | /* store the highest address on the stack to avoid crossing it*/ |
395 | low = frame->sp; | 395 | low = frame->sp; |
396 | ctrl.sp_high = ALIGN(low, THREAD_SIZE); | 396 | ctrl.sp_high = ALIGN(low, THREAD_SIZE); |
397 | 397 | ||
398 | pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, | 398 | pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, |
399 | frame->pc, frame->lr, frame->sp); | 399 | frame->pc, frame->lr, frame->sp); |
400 | 400 | ||
401 | if (!kernel_text_address(frame->pc)) | 401 | if (!kernel_text_address(frame->pc)) |
402 | return -URC_FAILURE; | 402 | return -URC_FAILURE; |
403 | 403 | ||
404 | idx = unwind_find_idx(frame->pc); | 404 | idx = unwind_find_idx(frame->pc); |
405 | if (!idx) { | 405 | if (!idx) { |
406 | pr_warning("unwind: Index not found %08lx\n", frame->pc); | 406 | pr_warning("unwind: Index not found %08lx\n", frame->pc); |
407 | return -URC_FAILURE; | 407 | return -URC_FAILURE; |
408 | } | 408 | } |
409 | 409 | ||
410 | ctrl.vrs[FP] = frame->fp; | 410 | ctrl.vrs[FP] = frame->fp; |
411 | ctrl.vrs[SP] = frame->sp; | 411 | ctrl.vrs[SP] = frame->sp; |
412 | ctrl.vrs[LR] = frame->lr; | 412 | ctrl.vrs[LR] = frame->lr; |
413 | ctrl.vrs[PC] = 0; | 413 | ctrl.vrs[PC] = 0; |
414 | 414 | ||
415 | if (idx->insn == 1) | 415 | if (idx->insn == 1) |
416 | /* can't unwind */ | 416 | /* can't unwind */ |
417 | return -URC_FAILURE; | 417 | return -URC_FAILURE; |
418 | else if ((idx->insn & 0x80000000) == 0) | 418 | else if ((idx->insn & 0x80000000) == 0) |
419 | /* prel31 to the unwind table */ | 419 | /* prel31 to the unwind table */ |
420 | ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn); | 420 | ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn); |
421 | else if ((idx->insn & 0xff000000) == 0x80000000) | 421 | else if ((idx->insn & 0xff000000) == 0x80000000) |
422 | /* only personality routine 0 supported in the index */ | 422 | /* only personality routine 0 supported in the index */ |
423 | ctrl.insn = &idx->insn; | 423 | ctrl.insn = &idx->insn; |
424 | else { | 424 | else { |
425 | pr_warning("unwind: Unsupported personality routine %08lx in the index at %p\n", | 425 | pr_warning("unwind: Unsupported personality routine %08lx in the index at %p\n", |
426 | idx->insn, idx); | 426 | idx->insn, idx); |
427 | return -URC_FAILURE; | 427 | return -URC_FAILURE; |
428 | } | 428 | } |
429 | 429 | ||
430 | /* check the personality routine */ | 430 | /* check the personality routine */ |
431 | if ((*ctrl.insn & 0xff000000) == 0x80000000) { | 431 | if ((*ctrl.insn & 0xff000000) == 0x80000000) { |
432 | ctrl.byte = 2; | 432 | ctrl.byte = 2; |
433 | ctrl.entries = 1; | 433 | ctrl.entries = 1; |
434 | } else if ((*ctrl.insn & 0xff000000) == 0x81000000) { | 434 | } else if ((*ctrl.insn & 0xff000000) == 0x81000000) { |
435 | ctrl.byte = 1; | 435 | ctrl.byte = 1; |
436 | ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); | 436 | ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); |
437 | } else { | 437 | } else { |
438 | pr_warning("unwind: Unsupported personality routine %08lx at %p\n", | 438 | pr_warning("unwind: Unsupported personality routine %08lx at %p\n", |
439 | *ctrl.insn, ctrl.insn); | 439 | *ctrl.insn, ctrl.insn); |
440 | return -URC_FAILURE; | 440 | return -URC_FAILURE; |
441 | } | 441 | } |
442 | 442 | ||
443 | ctrl.check_each_pop = 0; | 443 | ctrl.check_each_pop = 0; |
444 | 444 | ||
445 | while (ctrl.entries > 0) { | 445 | while (ctrl.entries > 0) { |
446 | int urc; | 446 | int urc; |
447 | if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs)) | 447 | if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs)) |
448 | ctrl.check_each_pop = 1; | 448 | ctrl.check_each_pop = 1; |
449 | urc = unwind_exec_insn(&ctrl); | 449 | urc = unwind_exec_insn(&ctrl); |
450 | if (urc < 0) | 450 | if (urc < 0) |
451 | return urc; | 451 | return urc; |
452 | if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= ctrl.sp_high) | 452 | if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= ctrl.sp_high) |
453 | return -URC_FAILURE; | 453 | return -URC_FAILURE; |
454 | } | 454 | } |
455 | 455 | ||
456 | if (ctrl.vrs[PC] == 0) | 456 | if (ctrl.vrs[PC] == 0) |
457 | ctrl.vrs[PC] = ctrl.vrs[LR]; | 457 | ctrl.vrs[PC] = ctrl.vrs[LR]; |
458 | 458 | ||
459 | /* check for infinite loop */ | 459 | /* check for infinite loop */ |
460 | if (frame->pc == ctrl.vrs[PC]) | 460 | if (frame->pc == ctrl.vrs[PC]) |
461 | return -URC_FAILURE; | 461 | return -URC_FAILURE; |
462 | 462 | ||
463 | frame->fp = ctrl.vrs[FP]; | 463 | frame->fp = ctrl.vrs[FP]; |
464 | frame->sp = ctrl.vrs[SP]; | 464 | frame->sp = ctrl.vrs[SP]; |
465 | frame->lr = ctrl.vrs[LR]; | 465 | frame->lr = ctrl.vrs[LR]; |
466 | frame->pc = ctrl.vrs[PC]; | 466 | frame->pc = ctrl.vrs[PC]; |
467 | 467 | ||
468 | return URC_OK; | 468 | return URC_OK; |
469 | } | 469 | } |
470 | 470 | ||
471 | void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) | 471 | void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) |
472 | { | 472 | { |
473 | struct stackframe frame; | 473 | struct stackframe frame; |
474 | register unsigned long current_sp asm ("sp"); | 474 | register unsigned long current_sp asm ("sp"); |
475 | 475 | ||
476 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); | 476 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); |
477 | 477 | ||
478 | if (!tsk) | 478 | if (!tsk) |
479 | tsk = current; | 479 | tsk = current; |
480 | 480 | ||
481 | if (regs) { | 481 | if (regs) { |
482 | frame.fp = regs->ARM_fp; | 482 | frame.fp = regs->ARM_fp; |
483 | frame.sp = regs->ARM_sp; | 483 | frame.sp = regs->ARM_sp; |
484 | frame.lr = regs->ARM_lr; | 484 | frame.lr = regs->ARM_lr; |
485 | /* PC might be corrupted, use LR in that case. */ | 485 | /* PC might be corrupted, use LR in that case. */ |
486 | frame.pc = kernel_text_address(regs->ARM_pc) | 486 | frame.pc = kernel_text_address(regs->ARM_pc) |
487 | ? regs->ARM_pc : regs->ARM_lr; | 487 | ? regs->ARM_pc : regs->ARM_lr; |
488 | } else if (tsk == current) { | 488 | } else if (tsk == current) { |
489 | frame.fp = (unsigned long)__builtin_frame_address(0); | 489 | frame.fp = (unsigned long)__builtin_frame_address(0); |
490 | frame.sp = current_sp; | 490 | frame.sp = current_sp; |
491 | frame.lr = (unsigned long)__builtin_return_address(0); | 491 | frame.lr = (unsigned long)__builtin_return_address(0); |
492 | frame.pc = (unsigned long)unwind_backtrace; | 492 | frame.pc = (unsigned long)unwind_backtrace; |
493 | } else { | 493 | } else { |
494 | /* task blocked in __switch_to */ | 494 | /* task blocked in __switch_to */ |
495 | frame.fp = thread_saved_fp(tsk); | 495 | frame.fp = thread_saved_fp(tsk); |
496 | frame.sp = thread_saved_sp(tsk); | 496 | frame.sp = thread_saved_sp(tsk); |
497 | /* | 497 | /* |
498 | * The function calling __switch_to cannot be a leaf function | 498 | * The function calling __switch_to cannot be a leaf function |
499 | * so LR is recovered from the stack. | 499 | * so LR is recovered from the stack. |
500 | */ | 500 | */ |
501 | frame.lr = 0; | 501 | frame.lr = 0; |
502 | frame.pc = thread_saved_pc(tsk); | 502 | frame.pc = thread_saved_pc(tsk); |
503 | } | 503 | } |
504 | 504 | ||
505 | while (1) { | 505 | while (1) { |
506 | int urc; | 506 | int urc; |
507 | unsigned long where = frame.pc; | 507 | unsigned long where = frame.pc; |
508 | 508 | ||
509 | urc = unwind_frame(&frame); | 509 | urc = unwind_frame(&frame); |
510 | if (urc < 0) | 510 | if (urc < 0) |
511 | break; | 511 | break; |
512 | dump_backtrace_entry(where, frame.pc, frame.sp - 4); | 512 | dump_backtrace_entry(where, frame.pc, frame.sp - 4); |
513 | } | 513 | } |
514 | } | 514 | } |
515 | 515 | ||
516 | struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, | 516 | struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, |
517 | unsigned long text_addr, | 517 | unsigned long text_addr, |
518 | unsigned long text_size) | 518 | unsigned long text_size) |
519 | { | 519 | { |
520 | unsigned long flags; | 520 | unsigned long flags; |
521 | struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL); | 521 | struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL); |
522 | 522 | ||
523 | pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size, | 523 | pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size, |
524 | text_addr, text_size); | 524 | text_addr, text_size); |
525 | 525 | ||
526 | if (!tab) | 526 | if (!tab) |
527 | return tab; | 527 | return tab; |
528 | 528 | ||
529 | tab->start = (const struct unwind_idx *)start; | 529 | tab->start = (const struct unwind_idx *)start; |
530 | tab->stop = (const struct unwind_idx *)(start + size); | 530 | tab->stop = (const struct unwind_idx *)(start + size); |
531 | tab->origin = unwind_find_origin(tab->start, tab->stop); | 531 | tab->origin = unwind_find_origin(tab->start, tab->stop); |
532 | tab->begin_addr = text_addr; | 532 | tab->begin_addr = text_addr; |
533 | tab->end_addr = text_addr + text_size; | 533 | tab->end_addr = text_addr + text_size; |
534 | 534 | ||
535 | spin_lock_irqsave(&unwind_lock, flags); | 535 | spin_lock_irqsave(&unwind_lock, flags); |
536 | list_add_tail(&tab->list, &unwind_tables); | 536 | list_add_tail(&tab->list, &unwind_tables); |
537 | spin_unlock_irqrestore(&unwind_lock, flags); | 537 | spin_unlock_irqrestore(&unwind_lock, flags); |
538 | 538 | ||
539 | return tab; | 539 | return tab; |
540 | } | 540 | } |
541 | 541 | ||
542 | void unwind_table_del(struct unwind_table *tab) | 542 | void unwind_table_del(struct unwind_table *tab) |
543 | { | 543 | { |
544 | unsigned long flags; | 544 | unsigned long flags; |
545 | 545 | ||
546 | if (!tab) | 546 | if (!tab) |
547 | return; | 547 | return; |
548 | 548 | ||
549 | spin_lock_irqsave(&unwind_lock, flags); | 549 | spin_lock_irqsave(&unwind_lock, flags); |
550 | list_del(&tab->list); | 550 | list_del(&tab->list); |
551 | spin_unlock_irqrestore(&unwind_lock, flags); | 551 | spin_unlock_irqrestore(&unwind_lock, flags); |
552 | 552 | ||
553 | kfree(tab); | 553 | kfree(tab); |
554 | } | 554 | } |
555 | 555 |
1 | /* | 1 | /* |
2 | * linux/arch/arm/mm/proc-v7m.S | 2 | * linux/arch/arm/mm/proc-v7m.S |
3 | * | 3 | * |
4 | * Copyright (C) 2008 ARM Ltd. | 4 | * Copyright (C) 2008 ARM Ltd. |
5 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | 5 | * Copyright (C) 2001 Deep Blue Solutions Ltd. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This is the "shell" of the ARMv7-M processor support. | 11 | * This is the "shell" of the ARMv7-M processor support. |
12 | */ | 12 | */ |
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <asm/assembler.h> | 14 | #include <asm/assembler.h> |
15 | #include <asm/v7m.h> | 15 | #include <asm/v7m.h> |
16 | #include "proc-macros.S" | 16 | #include "proc-macros.S" |
17 | 17 | ||
18 | ENTRY(cpu_v7m_proc_init) | 18 | ENTRY(cpu_v7m_proc_init) |
19 | mov pc, lr | 19 | mov pc, lr |
20 | ENDPROC(cpu_v7m_proc_init) | 20 | ENDPROC(cpu_v7m_proc_init) |
21 | 21 | ||
22 | ENTRY(cpu_v7m_proc_fin) | 22 | ENTRY(cpu_v7m_proc_fin) |
23 | mov pc, lr | 23 | mov pc, lr |
24 | ENDPROC(cpu_v7m_proc_fin) | 24 | ENDPROC(cpu_v7m_proc_fin) |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * cpu_v7m_reset(loc) | 27 | * cpu_v7m_reset(loc) |
28 | * | 28 | * |
29 | * Perform a soft reset of the system. Put the CPU into the | 29 | * Perform a soft reset of the system. Put the CPU into the |
30 | * same state as it would be if it had been reset, and branch | 30 | * same state as it would be if it had been reset, and branch |
31 | * to what would be the reset vector. | 31 | * to what would be the reset vector. |
32 | * | 32 | * |
33 | * - loc - location to jump to for soft reset | 33 | * - loc - location to jump to for soft reset |
34 | */ | 34 | */ |
35 | .align 5 | 35 | .align 5 |
36 | ENTRY(cpu_v7m_reset) | 36 | ENTRY(cpu_v7m_reset) |
37 | mov pc, r0 | 37 | mov pc, r0 |
38 | ENDPROC(cpu_v7m_reset) | 38 | ENDPROC(cpu_v7m_reset) |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * cpu_v7m_do_idle() | 41 | * cpu_v7m_do_idle() |
42 | * | 42 | * |
43 | * Idle the processor (eg, wait for interrupt). | 43 | * Idle the processor (eg, wait for interrupt). |
44 | * | 44 | * |
45 | * IRQs are already disabled. | 45 | * IRQs are already disabled. |
46 | */ | 46 | */ |
47 | ENTRY(cpu_v7m_do_idle) | 47 | ENTRY(cpu_v7m_do_idle) |
48 | wfi | 48 | wfi |
49 | mov pc, lr | 49 | mov pc, lr |
50 | ENDPROC(cpu_v7m_do_idle) | 50 | ENDPROC(cpu_v7m_do_idle) |
51 | 51 | ||
52 | ENTRY(cpu_v7m_dcache_clean_area) | 52 | ENTRY(cpu_v7m_dcache_clean_area) |
53 | mov pc, lr | 53 | mov pc, lr |
54 | ENDPROC(cpu_v7m_dcache_clean_area) | 54 | ENDPROC(cpu_v7m_dcache_clean_area) |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * There is no MMU, so here is nothing to do. | 57 | * There is no MMU, so here is nothing to do. |
58 | */ | 58 | */ |
59 | ENTRY(cpu_v7m_switch_mm) | 59 | ENTRY(cpu_v7m_switch_mm) |
60 | mov pc, lr | 60 | mov pc, lr |
61 | ENDPROC(cpu_v7m_switch_mm) | 61 | ENDPROC(cpu_v7m_switch_mm) |
62 | 62 | ||
63 | .globl cpu_v7m_suspend_size | 63 | .globl cpu_v7m_suspend_size |
64 | .equ cpu_v7m_suspend_size, 0 | 64 | .equ cpu_v7m_suspend_size, 0 |
65 | 65 | ||
66 | #ifdef CONFIG_ARM_CPU_SUSPEND | 66 | #ifdef CONFIG_ARM_CPU_SUSPEND |
67 | ENTRY(cpu_v7m_do_suspend) | 67 | ENTRY(cpu_v7m_do_suspend) |
68 | mov pc, lr | 68 | mov pc, lr |
69 | ENDPROC(cpu_v7m_do_suspend) | 69 | ENDPROC(cpu_v7m_do_suspend) |
70 | 70 | ||
71 | ENTRY(cpu_v7m_do_resume) | 71 | ENTRY(cpu_v7m_do_resume) |
72 | mov pc, lr | 72 | mov pc, lr |
73 | ENDPROC(cpu_v7m_do_resume) | 73 | ENDPROC(cpu_v7m_do_resume) |
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | .section ".text.init", #alloc, #execinstr | 76 | .section ".text.init", #alloc, #execinstr |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * __v7m_setup | 79 | * __v7m_setup |
80 | * | 80 | * |
81 | * This should be able to cover all ARMv7-M cores. | 81 | * This should be able to cover all ARMv7-M cores. |
82 | */ | 82 | */ |
83 | __v7m_setup: | 83 | __v7m_setup: |
84 | @ Configure the vector table base address | 84 | @ Configure the vector table base address |
85 | ldr r0, =BASEADDR_V7M_SCB | 85 | ldr r0, =BASEADDR_V7M_SCB |
86 | ldr r12, =vector_table | 86 | ldr r12, =vector_table |
87 | str r12, [r0, V7M_SCB_VTOR] | 87 | str r12, [r0, V7M_SCB_VTOR] |
88 | 88 | ||
89 | @ enable UsageFault, BusFault and MemManage fault. | 89 | @ enable UsageFault, BusFault and MemManage fault. |
90 | ldr r5, [r0, #V7M_SCB_SHCSR] | 90 | ldr r5, [r0, #V7M_SCB_SHCSR] |
91 | orr r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA) | 91 | orr r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA) |
92 | str r5, [r0, #V7M_SCB_SHCSR] | 92 | str r5, [r0, #V7M_SCB_SHCSR] |
93 | 93 | ||
94 | @ Lower the priority of the SVC and PendSV exceptions | 94 | @ Lower the priority of the SVC and PendSV exceptions |
95 | mov r5, #0x80000000 | 95 | mov r5, #0x80000000 |
96 | str r5, [r0, V7M_SCB_SHPR2] @ set SVC priority | 96 | str r5, [r0, V7M_SCB_SHPR2] @ set SVC priority |
97 | mov r5, #0x00800000 | 97 | mov r5, #0x00800000 |
98 | str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority | 98 | str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority |
99 | 99 | ||
100 | @ SVC to run the kernel in this mode | 100 | @ SVC to run the kernel in this mode |
101 | adr r1, BSYM(1f) | 101 | adr r1, BSYM(1f) |
102 | ldr r5, [r12, #11 * 4] @ read the SVC vector entry | 102 | ldr r5, [r12, #11 * 4] @ read the SVC vector entry |
103 | str r1, [r12, #11 * 4] @ write the temporary SVC vector entry | 103 | str r1, [r12, #11 * 4] @ write the temporary SVC vector entry |
104 | mov r6, lr @ save LR | 104 | mov r6, lr @ save LR |
105 | mov r7, sp @ save SP | 105 | mov r7, sp @ save SP |
106 | ldr sp, =__v7m_setup_stack_top | 106 | ldr sp, =__v7m_setup_stack_top |
107 | cpsie i | 107 | cpsie i |
108 | svc #0 | 108 | svc #0 |
109 | 1: cpsid i | 109 | 1: cpsid i |
110 | str r5, [r12, #11 * 4] @ restore the original SVC vector entry | 110 | str r5, [r12, #11 * 4] @ restore the original SVC vector entry |
111 | mov lr, r6 @ restore LR | 111 | mov lr, r6 @ restore LR |
112 | mov sp, r7 @ restore SP | 112 | mov sp, r7 @ restore SP |
113 | 113 | ||
114 | @ Special-purpose control register | 114 | @ Special-purpose control register |
115 | mov r1, #1 | 115 | mov r1, #1 |
116 | msr control, r1 @ Thread mode has unpriviledged access | 116 | msr control, r1 @ Thread mode has unpriviledged access |
117 | 117 | ||
118 | @ Configure the System Control Register to ensure 8-byte stack alignment | 118 | @ Configure the System Control Register to ensure 8-byte stack alignment |
119 | @ Note the STKALIGN bit is either RW or RAO. | 119 | @ Note the STKALIGN bit is either RW or RAO. |
120 | ldr r12, [r0, V7M_SCB_CCR] @ system control register | 120 | ldr r12, [r0, V7M_SCB_CCR] @ system control register |
121 | orr r12, #V7M_SCB_CCR_STKALIGN | 121 | orr r12, #V7M_SCB_CCR_STKALIGN |
122 | str r12, [r0, V7M_SCB_CCR] | 122 | str r12, [r0, V7M_SCB_CCR] |
123 | mov pc, lr | 123 | mov pc, lr |
124 | ENDPROC(__v7m_setup) | 124 | ENDPROC(__v7m_setup) |
125 | 125 | ||
126 | .align 2 | ||
127 | __v7m_setup_stack: | ||
128 | .space 4 * 8 @ 8 registers | ||
129 | __v7m_setup_stack_top: | ||
130 | |||
126 | define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 | 131 | define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 |
127 | 132 | ||
128 | .section ".rodata" | 133 | .section ".rodata" |
129 | string cpu_arch_name, "armv7m" | 134 | string cpu_arch_name, "armv7m" |
130 | string cpu_elf_name "v7m" | 135 | string cpu_elf_name "v7m" |
131 | string cpu_v7m_name "ARMv7-M" | 136 | string cpu_v7m_name "ARMv7-M" |
132 | 137 | ||
133 | .section ".proc.info.init", #alloc, #execinstr | 138 | .section ".proc.info.init", #alloc, #execinstr |
134 | 139 | ||
135 | /* | 140 | /* |
136 | * Match any ARMv7-M processor core. | 141 | * Match any ARMv7-M processor core. |
137 | */ | 142 | */ |
138 | .type __v7m_proc_info, #object | 143 | .type __v7m_proc_info, #object |
139 | __v7m_proc_info: | 144 | __v7m_proc_info: |
140 | .long 0x000f0000 @ Required ID value | 145 | .long 0x000f0000 @ Required ID value |
141 | .long 0x000f0000 @ Mask for ID | 146 | .long 0x000f0000 @ Mask for ID |
142 | .long 0 @ proc_info_list.__cpu_mm_mmu_flags | 147 | .long 0 @ proc_info_list.__cpu_mm_mmu_flags |
143 | .long 0 @ proc_info_list.__cpu_io_mmu_flags | 148 | .long 0 @ proc_info_list.__cpu_io_mmu_flags |
144 | b __v7m_setup @ proc_info_list.__cpu_flush | 149 | b __v7m_setup @ proc_info_list.__cpu_flush |
145 | .long cpu_arch_name | 150 | .long cpu_arch_name |
146 | .long cpu_elf_name | 151 | .long cpu_elf_name |
147 | .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT | 152 | .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT |
148 | .long cpu_v7m_name | 153 | .long cpu_v7m_name |
149 | .long v7m_processor_functions @ proc_info_list.proc | 154 | .long v7m_processor_functions @ proc_info_list.proc |
150 | .long 0 @ proc_info_list.tlb | 155 | .long 0 @ proc_info_list.tlb |
151 | .long 0 @ proc_info_list.user | 156 | .long 0 @ proc_info_list.user |
152 | .long nop_cache_fns @ proc_info_list.cache | 157 | .long nop_cache_fns @ proc_info_list.cache |
153 | .size __v7m_proc_info, . - __v7m_proc_info | 158 | .size __v7m_proc_info, . - __v7m_proc_info |
154 | |||
155 | __v7m_setup_stack: |
1 | /* | 1 | /* |
2 | * linux/include/amba/bus.h | 2 | * linux/include/amba/bus.h |
3 | * | 3 | * |
4 | * This device type deals with ARM PrimeCells and anything else that | 4 | * This device type deals with ARM PrimeCells and anything else that |
5 | * presents a proper CID (0xB105F00D) at the end of the I/O register | 5 | * presents a proper CID (0xB105F00D) at the end of the I/O register |
6 | * region or that is derived from a PrimeCell. | 6 | * region or that is derived from a PrimeCell. |
7 | * | 7 | * |
8 | * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. | 8 | * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | #ifndef ASMARM_AMBA_H | 14 | #ifndef ASMARM_AMBA_H |
15 | #define ASMARM_AMBA_H | 15 | #define ASMARM_AMBA_H |
16 | 16 | ||
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/mod_devicetable.h> | 19 | #include <linux/mod_devicetable.h> |
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | #include <linux/resource.h> | 21 | #include <linux/resource.h> |
22 | #include <linux/regulator/consumer.h> | 22 | #include <linux/regulator/consumer.h> |
23 | 23 | ||
24 | #define AMBA_NR_IRQS 9 | 24 | #define AMBA_NR_IRQS 9 |
25 | #define AMBA_CID 0xb105f00d | 25 | #define AMBA_CID 0xb105f00d |
26 | 26 | ||
27 | struct clk; | 27 | struct clk; |
28 | 28 | ||
29 | struct amba_device { | 29 | struct amba_device { |
30 | struct device dev; | 30 | struct device dev; |
31 | struct resource res; | 31 | struct resource res; |
32 | struct clk *pclk; | 32 | struct clk *pclk; |
33 | unsigned int periphid; | 33 | unsigned int periphid; |
34 | unsigned int irq[AMBA_NR_IRQS]; | 34 | unsigned int irq[AMBA_NR_IRQS]; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct amba_driver { | 37 | struct amba_driver { |
38 | struct device_driver drv; | 38 | struct device_driver drv; |
39 | int (*probe)(struct amba_device *, const struct amba_id *); | 39 | int (*probe)(struct amba_device *, const struct amba_id *); |
40 | int (*remove)(struct amba_device *); | 40 | int (*remove)(struct amba_device *); |
41 | void (*shutdown)(struct amba_device *); | 41 | void (*shutdown)(struct amba_device *); |
42 | int (*suspend)(struct amba_device *, pm_message_t); | 42 | int (*suspend)(struct amba_device *, pm_message_t); |
43 | int (*resume)(struct amba_device *); | 43 | int (*resume)(struct amba_device *); |
44 | const struct amba_id *id_table; | 44 | const struct amba_id *id_table; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | enum amba_vendor { | 47 | enum amba_vendor { |
48 | AMBA_VENDOR_ARM = 0x41, | 48 | AMBA_VENDOR_ARM = 0x41, |
49 | AMBA_VENDOR_ST = 0x80, | 49 | AMBA_VENDOR_ST = 0x80, |
50 | AMBA_VENDOR_QCOM = 0x51, | ||
50 | }; | 51 | }; |
51 | 52 | ||
52 | extern struct bus_type amba_bustype; | 53 | extern struct bus_type amba_bustype; |
53 | 54 | ||
54 | #define to_amba_device(d) container_of(d, struct amba_device, dev) | 55 | #define to_amba_device(d) container_of(d, struct amba_device, dev) |
55 | 56 | ||
56 | #define amba_get_drvdata(d) dev_get_drvdata(&d->dev) | 57 | #define amba_get_drvdata(d) dev_get_drvdata(&d->dev) |
57 | #define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p) | 58 | #define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p) |
58 | 59 | ||
59 | int amba_driver_register(struct amba_driver *); | 60 | int amba_driver_register(struct amba_driver *); |
60 | void amba_driver_unregister(struct amba_driver *); | 61 | void amba_driver_unregister(struct amba_driver *); |
61 | struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); | 62 | struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); |
62 | void amba_device_put(struct amba_device *); | 63 | void amba_device_put(struct amba_device *); |
63 | int amba_device_add(struct amba_device *, struct resource *); | 64 | int amba_device_add(struct amba_device *, struct resource *); |
64 | int amba_device_register(struct amba_device *, struct resource *); | 65 | int amba_device_register(struct amba_device *, struct resource *); |
65 | struct amba_device *amba_apb_device_add(struct device *parent, const char *name, | 66 | struct amba_device *amba_apb_device_add(struct device *parent, const char *name, |
66 | resource_size_t base, size_t size, | 67 | resource_size_t base, size_t size, |
67 | int irq1, int irq2, void *pdata, | 68 | int irq1, int irq2, void *pdata, |
68 | unsigned int periphid); | 69 | unsigned int periphid); |
69 | struct amba_device *amba_ahb_device_add(struct device *parent, const char *name, | 70 | struct amba_device *amba_ahb_device_add(struct device *parent, const char *name, |
70 | resource_size_t base, size_t size, | 71 | resource_size_t base, size_t size, |
71 | int irq1, int irq2, void *pdata, | 72 | int irq1, int irq2, void *pdata, |
72 | unsigned int periphid); | 73 | unsigned int periphid); |
73 | struct amba_device * | 74 | struct amba_device * |
74 | amba_apb_device_add_res(struct device *parent, const char *name, | 75 | amba_apb_device_add_res(struct device *parent, const char *name, |
75 | resource_size_t base, size_t size, int irq1, | 76 | resource_size_t base, size_t size, int irq1, |
76 | int irq2, void *pdata, unsigned int periphid, | 77 | int irq2, void *pdata, unsigned int periphid, |
77 | struct resource *resbase); | 78 | struct resource *resbase); |
78 | struct amba_device * | 79 | struct amba_device * |
79 | amba_ahb_device_add_res(struct device *parent, const char *name, | 80 | amba_ahb_device_add_res(struct device *parent, const char *name, |
80 | resource_size_t base, size_t size, int irq1, | 81 | resource_size_t base, size_t size, int irq1, |
81 | int irq2, void *pdata, unsigned int periphid, | 82 | int irq2, void *pdata, unsigned int periphid, |
82 | struct resource *resbase); | 83 | struct resource *resbase); |
83 | void amba_device_unregister(struct amba_device *); | 84 | void amba_device_unregister(struct amba_device *); |
84 | struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int); | 85 | struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int); |
85 | int amba_request_regions(struct amba_device *, const char *); | 86 | int amba_request_regions(struct amba_device *, const char *); |
86 | void amba_release_regions(struct amba_device *); | 87 | void amba_release_regions(struct amba_device *); |
87 | 88 | ||
88 | #define amba_pclk_enable(d) \ | 89 | #define amba_pclk_enable(d) \ |
89 | (IS_ERR((d)->pclk) ? 0 : clk_enable((d)->pclk)) | 90 | (IS_ERR((d)->pclk) ? 0 : clk_enable((d)->pclk)) |
90 | 91 | ||
91 | #define amba_pclk_disable(d) \ | 92 | #define amba_pclk_disable(d) \ |
92 | do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) | 93 | do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) |
93 | 94 | ||
94 | /* Some drivers don't use the struct amba_device */ | 95 | /* Some drivers don't use the struct amba_device */ |
95 | #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) | 96 | #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) |
96 | #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) | 97 | #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) |
97 | #define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff) | 98 | #define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff) |
98 | #define AMBA_PART_BITS(a) ((a) & 0xfff) | 99 | #define AMBA_PART_BITS(a) ((a) & 0xfff) |
99 | 100 | ||
100 | #define amba_config(d) AMBA_CONFIG_BITS((d)->periphid) | 101 | #define amba_config(d) AMBA_CONFIG_BITS((d)->periphid) |
101 | #define amba_rev(d) AMBA_REV_BITS((d)->periphid) | 102 | #define amba_rev(d) AMBA_REV_BITS((d)->periphid) |
102 | #define amba_manf(d) AMBA_MANF_BITS((d)->periphid) | 103 | #define amba_manf(d) AMBA_MANF_BITS((d)->periphid) |
103 | #define amba_part(d) AMBA_PART_BITS((d)->periphid) | 104 | #define amba_part(d) AMBA_PART_BITS((d)->periphid) |
104 | 105 | ||
105 | #define __AMBA_DEV(busid, data, mask) \ | 106 | #define __AMBA_DEV(busid, data, mask) \ |
106 | { \ | 107 | { \ |
107 | .coherent_dma_mask = mask, \ | 108 | .coherent_dma_mask = mask, \ |
108 | .init_name = busid, \ | 109 | .init_name = busid, \ |
109 | .platform_data = data, \ | 110 | .platform_data = data, \ |
110 | } | 111 | } |
111 | 112 | ||
112 | /* | 113 | /* |
113 | * APB devices do not themselves have the ability to address memory, | 114 | * APB devices do not themselves have the ability to address memory, |
114 | * so DMA masks should be zero (much like USB peripheral devices.) | 115 | * so DMA masks should be zero (much like USB peripheral devices.) |
115 | * The DMA controller DMA masks should be used instead (much like | 116 | * The DMA controller DMA masks should be used instead (much like |
116 | * USB host controllers in conventional PCs.) | 117 | * USB host controllers in conventional PCs.) |
117 | */ | 118 | */ |
118 | #define AMBA_APB_DEVICE(name, busid, id, base, irqs, data) \ | 119 | #define AMBA_APB_DEVICE(name, busid, id, base, irqs, data) \ |
119 | struct amba_device name##_device = { \ | 120 | struct amba_device name##_device = { \ |
120 | .dev = __AMBA_DEV(busid, data, 0), \ | 121 | .dev = __AMBA_DEV(busid, data, 0), \ |
121 | .res = DEFINE_RES_MEM(base, SZ_4K), \ | 122 | .res = DEFINE_RES_MEM(base, SZ_4K), \ |
122 | .irq = irqs, \ | 123 | .irq = irqs, \ |
123 | .periphid = id, \ | 124 | .periphid = id, \ |
124 | } | 125 | } |
125 | 126 | ||
126 | /* | 127 | /* |
127 | * AHB devices are DMA capable, so set their DMA masks | 128 | * AHB devices are DMA capable, so set their DMA masks |
128 | */ | 129 | */ |
129 | #define AMBA_AHB_DEVICE(name, busid, id, base, irqs, data) \ | 130 | #define AMBA_AHB_DEVICE(name, busid, id, base, irqs, data) \ |
130 | struct amba_device name##_device = { \ | 131 | struct amba_device name##_device = { \ |
131 | .dev = __AMBA_DEV(busid, data, ~0ULL), \ | 132 | .dev = __AMBA_DEV(busid, data, ~0ULL), \ |
132 | .res = DEFINE_RES_MEM(base, SZ_4K), \ | 133 | .res = DEFINE_RES_MEM(base, SZ_4K), \ |
133 | .irq = irqs, \ | 134 | .irq = irqs, \ |
134 | .periphid = id, \ | 135 | .periphid = id, \ |
135 | } | 136 | } |
136 | 137 | ||
137 | /* | 138 | /* |
138 | * module_amba_driver() - Helper macro for drivers that don't do anything | 139 | * module_amba_driver() - Helper macro for drivers that don't do anything |
139 | * special in module init/exit. This eliminates a lot of boilerplate. Each | 140 | * special in module init/exit. This eliminates a lot of boilerplate. Each |
140 | * module may only use this macro once, and calling it replaces module_init() | 141 | * module may only use this macro once, and calling it replaces module_init() |
141 | * and module_exit() | 142 | * and module_exit() |
142 | */ | 143 | */ |
143 | #define module_amba_driver(__amba_drv) \ | 144 | #define module_amba_driver(__amba_drv) \ |
144 | module_driver(__amba_drv, amba_driver_register, amba_driver_unregister) | 145 | module_driver(__amba_drv, amba_driver_register, amba_driver_unregister) |
145 | 146 | ||
146 | #endif | 147 | #endif |
147 | 148 |
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24
-
mentioned in commit 1cec24