Commit 328f5cc30290a92ea3ca62b2a63d2b9ebcb0d334
1 parent
5dd12af05c
Exists in
master
and in
7 other branches
ARM: Use struct syscore_ops instead of sysdevs for PM in common code
Convert some ARM architecture's common code to using struct syscore_ops objects for power management instead of sysdev classes and sysdevs. This simplifies the code and reduces the kernel's memory footprint. It also is necessary for removing sysdevs from the kernel entirely in the future. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
Showing 5 changed files with 58 additions and 94 deletions Inline Diff
arch/arm/common/vic.c
1 | /* | 1 | /* |
2 | * linux/arch/arm/common/vic.c | 2 | * linux/arch/arm/common/vic.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999 - 2003 ARM Limited | 4 | * Copyright (C) 1999 - 2003 ARM Limited |
5 | * Copyright (C) 2000 Deep Blue Solutions Ltd | 5 | * Copyright (C) 2000 Deep Blue Solutions Ltd |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
9 | * the Free Software Foundation; either version 2 of the License, or | 9 | * the Free Software Foundation; either version 2 of the License, or |
10 | * (at your option) any later version. | 10 | * (at your option) any later version. |
11 | * | 11 | * |
12 | * This program is distributed in the hope that it will be useful, | 12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | 16 | * |
17 | * You should have received a copy of the GNU General Public License | 17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/sysdev.h> | 25 | #include <linux/syscore_ops.h> |
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/amba/bus.h> | 27 | #include <linux/amba/bus.h> |
28 | 28 | ||
29 | #include <asm/mach/irq.h> | 29 | #include <asm/mach/irq.h> |
30 | #include <asm/hardware/vic.h> | 30 | #include <asm/hardware/vic.h> |
31 | 31 | ||
32 | #if defined(CONFIG_PM) | 32 | #ifdef CONFIG_PM |
33 | /** | 33 | /** |
34 | * struct vic_device - VIC PM device | 34 | * struct vic_device - VIC PM device |
35 | * @sysdev: The system device which is registered. | ||
36 | * @irq: The IRQ number for the base of the VIC. | 35 | * @irq: The IRQ number for the base of the VIC. |
37 | * @base: The register base for the VIC. | 36 | * @base: The register base for the VIC. |
38 | * @resume_sources: A bitmask of interrupts for resume. | 37 | * @resume_sources: A bitmask of interrupts for resume. |
39 | * @resume_irqs: The IRQs enabled for resume. | 38 | * @resume_irqs: The IRQs enabled for resume. |
40 | * @int_select: Save for VIC_INT_SELECT. | 39 | * @int_select: Save for VIC_INT_SELECT. |
41 | * @int_enable: Save for VIC_INT_ENABLE. | 40 | * @int_enable: Save for VIC_INT_ENABLE. |
42 | * @soft_int: Save for VIC_INT_SOFT. | 41 | * @soft_int: Save for VIC_INT_SOFT. |
43 | * @protect: Save for VIC_PROTECT. | 42 | * @protect: Save for VIC_PROTECT. |
44 | */ | 43 | */ |
45 | struct vic_device { | 44 | struct vic_device { |
46 | struct sys_device sysdev; | ||
47 | |||
48 | void __iomem *base; | 45 | void __iomem *base; |
49 | int irq; | 46 | int irq; |
50 | u32 resume_sources; | 47 | u32 resume_sources; |
51 | u32 resume_irqs; | 48 | u32 resume_irqs; |
52 | u32 int_select; | 49 | u32 int_select; |
53 | u32 int_enable; | 50 | u32 int_enable; |
54 | u32 soft_int; | 51 | u32 soft_int; |
55 | u32 protect; | 52 | u32 protect; |
56 | }; | 53 | }; |
57 | 54 | ||
58 | /* we cannot allocate memory when VICs are initially registered */ | 55 | /* we cannot allocate memory when VICs are initially registered */ |
59 | static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; | 56 | static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; |
60 | 57 | ||
61 | static int vic_id; | 58 | static int vic_id; |
62 | |||
63 | static inline struct vic_device *to_vic(struct sys_device *sys) | ||
64 | { | ||
65 | return container_of(sys, struct vic_device, sysdev); | ||
66 | } | ||
67 | #endif /* CONFIG_PM */ | 59 | #endif /* CONFIG_PM */ |
68 | 60 | ||
69 | /** | 61 | /** |
70 | * vic_init2 - common initialisation code | 62 | * vic_init2 - common initialisation code |
71 | * @base: Base of the VIC. | 63 | * @base: Base of the VIC. |
72 | * | 64 | * |
73 | * Common initialisation code for registration | 65 | * Common initialisation code for registration |
74 | * and resume. | 66 | * and resume. |
75 | */ | 67 | */ |
76 | static void vic_init2(void __iomem *base) | 68 | static void vic_init2(void __iomem *base) |
77 | { | 69 | { |
78 | int i; | 70 | int i; |
79 | 71 | ||
80 | for (i = 0; i < 16; i++) { | 72 | for (i = 0; i < 16; i++) { |
81 | void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4); | 73 | void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4); |
82 | writel(VIC_VECT_CNTL_ENABLE | i, reg); | 74 | writel(VIC_VECT_CNTL_ENABLE | i, reg); |
83 | } | 75 | } |
84 | 76 | ||
85 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); | 77 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); |
86 | } | 78 | } |
87 | 79 | ||
88 | #if defined(CONFIG_PM) | 80 | #ifdef CONFIG_PM |
89 | static int vic_class_resume(struct sys_device *dev) | 81 | static void resume_one_vic(struct vic_device *vic) |
90 | { | 82 | { |
91 | struct vic_device *vic = to_vic(dev); | ||
92 | void __iomem *base = vic->base; | 83 | void __iomem *base = vic->base; |
93 | 84 | ||
94 | printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); | 85 | printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); |
95 | 86 | ||
96 | /* re-initialise static settings */ | 87 | /* re-initialise static settings */ |
97 | vic_init2(base); | 88 | vic_init2(base); |
98 | 89 | ||
99 | writel(vic->int_select, base + VIC_INT_SELECT); | 90 | writel(vic->int_select, base + VIC_INT_SELECT); |
100 | writel(vic->protect, base + VIC_PROTECT); | 91 | writel(vic->protect, base + VIC_PROTECT); |
101 | 92 | ||
102 | /* set the enabled ints and then clear the non-enabled */ | 93 | /* set the enabled ints and then clear the non-enabled */ |
103 | writel(vic->int_enable, base + VIC_INT_ENABLE); | 94 | writel(vic->int_enable, base + VIC_INT_ENABLE); |
104 | writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR); | 95 | writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR); |
105 | 96 | ||
106 | /* and the same for the soft-int register */ | 97 | /* and the same for the soft-int register */ |
107 | 98 | ||
108 | writel(vic->soft_int, base + VIC_INT_SOFT); | 99 | writel(vic->soft_int, base + VIC_INT_SOFT); |
109 | writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); | 100 | writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); |
101 | } | ||
110 | 102 | ||
111 | return 0; | 103 | static void vic_resume(void) |
104 | { | ||
105 | int id; | ||
106 | |||
107 | for (id = vic_id - 1; id >= 0; id--) | ||
108 | resume_one_vic(vic_devices + id); | ||
112 | } | 109 | } |
113 | 110 | ||
114 | static int vic_class_suspend(struct sys_device *dev, pm_message_t state) | 111 | static void suspend_one_vic(struct vic_device *vic) |
115 | { | 112 | { |
116 | struct vic_device *vic = to_vic(dev); | ||
117 | void __iomem *base = vic->base; | 113 | void __iomem *base = vic->base; |
118 | 114 | ||
119 | printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); | 115 | printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); |
120 | 116 | ||
121 | vic->int_select = readl(base + VIC_INT_SELECT); | 117 | vic->int_select = readl(base + VIC_INT_SELECT); |
122 | vic->int_enable = readl(base + VIC_INT_ENABLE); | 118 | vic->int_enable = readl(base + VIC_INT_ENABLE); |
123 | vic->soft_int = readl(base + VIC_INT_SOFT); | 119 | vic->soft_int = readl(base + VIC_INT_SOFT); |
124 | vic->protect = readl(base + VIC_PROTECT); | 120 | vic->protect = readl(base + VIC_PROTECT); |
125 | 121 | ||
126 | /* set the interrupts (if any) that are used for | 122 | /* set the interrupts (if any) that are used for |
127 | * resuming the system */ | 123 | * resuming the system */ |
128 | 124 | ||
129 | writel(vic->resume_irqs, base + VIC_INT_ENABLE); | 125 | writel(vic->resume_irqs, base + VIC_INT_ENABLE); |
130 | writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); | 126 | writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); |
127 | } | ||
131 | 128 | ||
129 | static int vic_suspend(void) | ||
130 | { | ||
131 | int id; | ||
132 | |||
133 | for (id = 0; id < vic_id; id++) | ||
134 | suspend_one_vic(vic_devices + id); | ||
135 | |||
132 | return 0; | 136 | return 0; |
133 | } | 137 | } |
134 | 138 | ||
135 | struct sysdev_class vic_class = { | 139 | struct syscore_ops vic_syscore_ops = { |
136 | .name = "vic", | 140 | .suspend = vic_suspend, |
137 | .suspend = vic_class_suspend, | 141 | .resume = vic_resume, |
138 | .resume = vic_class_resume, | ||
139 | }; | 142 | }; |
140 | 143 | ||
141 | /** | 144 | /** |
142 | * vic_pm_init - initicall to register VIC pm | 145 | * vic_pm_init - initicall to register VIC pm |
143 | * | 146 | * |
144 | * This is called via late_initcall() to register | 147 | * This is called via late_initcall() to register |
145 | * the resources for the VICs due to the early | 148 | * the resources for the VICs due to the early |
146 | * nature of the VIC's registration. | 149 | * nature of the VIC's registration. |
147 | */ | 150 | */ |
148 | static int __init vic_pm_init(void) | 151 | static int __init vic_pm_init(void) |
149 | { | 152 | { |
150 | struct vic_device *dev = vic_devices; | 153 | if (vic_id > 0) |
151 | int err; | 154 | register_syscore_ops(&vic_syscore_ops); |
152 | int id; | ||
153 | |||
154 | if (vic_id == 0) | ||
155 | return 0; | ||
156 | |||
157 | err = sysdev_class_register(&vic_class); | ||
158 | if (err) { | ||
159 | printk(KERN_ERR "%s: cannot register class\n", __func__); | ||
160 | return err; | ||
161 | } | ||
162 | |||
163 | for (id = 0; id < vic_id; id++, dev++) { | ||
164 | dev->sysdev.id = id; | ||
165 | dev->sysdev.cls = &vic_class; | ||
166 | |||
167 | err = sysdev_register(&dev->sysdev); | ||
168 | if (err) { | ||
169 | printk(KERN_ERR "%s: failed to register device\n", | ||
170 | __func__); | ||
171 | return err; | ||
172 | } | ||
173 | } | ||
174 | 155 | ||
175 | return 0; | 156 | return 0; |
176 | } | 157 | } |
177 | late_initcall(vic_pm_init); | 158 | late_initcall(vic_pm_init); |
178 | 159 | ||
179 | /** | 160 | /** |
180 | * vic_pm_register - Register a VIC for later power management control | 161 | * vic_pm_register - Register a VIC for later power management control |
181 | * @base: The base address of the VIC. | 162 | * @base: The base address of the VIC. |
182 | * @irq: The base IRQ for the VIC. | 163 | * @irq: The base IRQ for the VIC. |
183 | * @resume_sources: bitmask of interrupts allowed for resume sources. | 164 | * @resume_sources: bitmask of interrupts allowed for resume sources. |
184 | * | 165 | * |
185 | * Register the VIC with the system device tree so that it can be notified | 166 | * Register the VIC with the system device tree so that it can be notified |
186 | * of suspend and resume requests and ensure that the correct actions are | 167 | * of suspend and resume requests and ensure that the correct actions are |
187 | * taken to re-instate the settings on resume. | 168 | * taken to re-instate the settings on resume. |
188 | */ | 169 | */ |
189 | static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources) | 170 | static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources) |
190 | { | 171 | { |
191 | struct vic_device *v; | 172 | struct vic_device *v; |
192 | 173 | ||
193 | if (vic_id >= ARRAY_SIZE(vic_devices)) | 174 | if (vic_id >= ARRAY_SIZE(vic_devices)) |
194 | printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__); | 175 | printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__); |
195 | else { | 176 | else { |
196 | v = &vic_devices[vic_id]; | 177 | v = &vic_devices[vic_id]; |
197 | v->base = base; | 178 | v->base = base; |
198 | v->resume_sources = resume_sources; | 179 | v->resume_sources = resume_sources; |
199 | v->irq = irq; | 180 | v->irq = irq; |
200 | vic_id++; | 181 | vic_id++; |
201 | } | 182 | } |
202 | } | 183 | } |
203 | #else | 184 | #else |
204 | static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { } | 185 | static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { } |
205 | #endif /* CONFIG_PM */ | 186 | #endif /* CONFIG_PM */ |
206 | 187 | ||
207 | static void vic_ack_irq(struct irq_data *d) | 188 | static void vic_ack_irq(struct irq_data *d) |
208 | { | 189 | { |
209 | void __iomem *base = irq_data_get_irq_chip_data(d); | 190 | void __iomem *base = irq_data_get_irq_chip_data(d); |
210 | unsigned int irq = d->irq & 31; | 191 | unsigned int irq = d->irq & 31; |
211 | writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); | 192 | writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); |
212 | /* moreover, clear the soft-triggered, in case it was the reason */ | 193 | /* moreover, clear the soft-triggered, in case it was the reason */ |
213 | writel(1 << irq, base + VIC_INT_SOFT_CLEAR); | 194 | writel(1 << irq, base + VIC_INT_SOFT_CLEAR); |
214 | } | 195 | } |
215 | 196 | ||
216 | static void vic_mask_irq(struct irq_data *d) | 197 | static void vic_mask_irq(struct irq_data *d) |
217 | { | 198 | { |
218 | void __iomem *base = irq_data_get_irq_chip_data(d); | 199 | void __iomem *base = irq_data_get_irq_chip_data(d); |
219 | unsigned int irq = d->irq & 31; | 200 | unsigned int irq = d->irq & 31; |
220 | writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); | 201 | writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); |
221 | } | 202 | } |
222 | 203 | ||
223 | static void vic_unmask_irq(struct irq_data *d) | 204 | static void vic_unmask_irq(struct irq_data *d) |
224 | { | 205 | { |
225 | void __iomem *base = irq_data_get_irq_chip_data(d); | 206 | void __iomem *base = irq_data_get_irq_chip_data(d); |
226 | unsigned int irq = d->irq & 31; | 207 | unsigned int irq = d->irq & 31; |
227 | writel(1 << irq, base + VIC_INT_ENABLE); | 208 | writel(1 << irq, base + VIC_INT_ENABLE); |
228 | } | 209 | } |
229 | 210 | ||
230 | #if defined(CONFIG_PM) | 211 | #if defined(CONFIG_PM) |
231 | static struct vic_device *vic_from_irq(unsigned int irq) | 212 | static struct vic_device *vic_from_irq(unsigned int irq) |
232 | { | 213 | { |
233 | struct vic_device *v = vic_devices; | 214 | struct vic_device *v = vic_devices; |
234 | unsigned int base_irq = irq & ~31; | 215 | unsigned int base_irq = irq & ~31; |
235 | int id; | 216 | int id; |
236 | 217 | ||
237 | for (id = 0; id < vic_id; id++, v++) { | 218 | for (id = 0; id < vic_id; id++, v++) { |
238 | if (v->irq == base_irq) | 219 | if (v->irq == base_irq) |
239 | return v; | 220 | return v; |
240 | } | 221 | } |
241 | 222 | ||
242 | return NULL; | 223 | return NULL; |
243 | } | 224 | } |
244 | 225 | ||
245 | static int vic_set_wake(struct irq_data *d, unsigned int on) | 226 | static int vic_set_wake(struct irq_data *d, unsigned int on) |
246 | { | 227 | { |
247 | struct vic_device *v = vic_from_irq(d->irq); | 228 | struct vic_device *v = vic_from_irq(d->irq); |
248 | unsigned int off = d->irq & 31; | 229 | unsigned int off = d->irq & 31; |
249 | u32 bit = 1 << off; | 230 | u32 bit = 1 << off; |
250 | 231 | ||
251 | if (!v) | 232 | if (!v) |
252 | return -EINVAL; | 233 | return -EINVAL; |
253 | 234 | ||
254 | if (!(bit & v->resume_sources)) | 235 | if (!(bit & v->resume_sources)) |
255 | return -EINVAL; | 236 | return -EINVAL; |
256 | 237 | ||
257 | if (on) | 238 | if (on) |
258 | v->resume_irqs |= bit; | 239 | v->resume_irqs |= bit; |
259 | else | 240 | else |
260 | v->resume_irqs &= ~bit; | 241 | v->resume_irqs &= ~bit; |
261 | 242 | ||
262 | return 0; | 243 | return 0; |
263 | } | 244 | } |
264 | #else | 245 | #else |
265 | #define vic_set_wake NULL | 246 | #define vic_set_wake NULL |
266 | #endif /* CONFIG_PM */ | 247 | #endif /* CONFIG_PM */ |
267 | 248 | ||
268 | static struct irq_chip vic_chip = { | 249 | static struct irq_chip vic_chip = { |
269 | .name = "VIC", | 250 | .name = "VIC", |
270 | .irq_ack = vic_ack_irq, | 251 | .irq_ack = vic_ack_irq, |
271 | .irq_mask = vic_mask_irq, | 252 | .irq_mask = vic_mask_irq, |
272 | .irq_unmask = vic_unmask_irq, | 253 | .irq_unmask = vic_unmask_irq, |
273 | .irq_set_wake = vic_set_wake, | 254 | .irq_set_wake = vic_set_wake, |
274 | }; | 255 | }; |
275 | 256 | ||
276 | static void __init vic_disable(void __iomem *base) | 257 | static void __init vic_disable(void __iomem *base) |
277 | { | 258 | { |
278 | writel(0, base + VIC_INT_SELECT); | 259 | writel(0, base + VIC_INT_SELECT); |
279 | writel(0, base + VIC_INT_ENABLE); | 260 | writel(0, base + VIC_INT_ENABLE); |
280 | writel(~0, base + VIC_INT_ENABLE_CLEAR); | 261 | writel(~0, base + VIC_INT_ENABLE_CLEAR); |
281 | writel(0, base + VIC_IRQ_STATUS); | 262 | writel(0, base + VIC_IRQ_STATUS); |
282 | writel(0, base + VIC_ITCR); | 263 | writel(0, base + VIC_ITCR); |
283 | writel(~0, base + VIC_INT_SOFT_CLEAR); | 264 | writel(~0, base + VIC_INT_SOFT_CLEAR); |
284 | } | 265 | } |
285 | 266 | ||
286 | static void __init vic_clear_interrupts(void __iomem *base) | 267 | static void __init vic_clear_interrupts(void __iomem *base) |
287 | { | 268 | { |
288 | unsigned int i; | 269 | unsigned int i; |
289 | 270 | ||
290 | writel(0, base + VIC_PL190_VECT_ADDR); | 271 | writel(0, base + VIC_PL190_VECT_ADDR); |
291 | for (i = 0; i < 19; i++) { | 272 | for (i = 0; i < 19; i++) { |
292 | unsigned int value; | 273 | unsigned int value; |
293 | 274 | ||
294 | value = readl(base + VIC_PL190_VECT_ADDR); | 275 | value = readl(base + VIC_PL190_VECT_ADDR); |
295 | writel(value, base + VIC_PL190_VECT_ADDR); | 276 | writel(value, base + VIC_PL190_VECT_ADDR); |
296 | } | 277 | } |
297 | } | 278 | } |
298 | 279 | ||
299 | static void __init vic_set_irq_sources(void __iomem *base, | 280 | static void __init vic_set_irq_sources(void __iomem *base, |
300 | unsigned int irq_start, u32 vic_sources) | 281 | unsigned int irq_start, u32 vic_sources) |
301 | { | 282 | { |
302 | unsigned int i; | 283 | unsigned int i; |
303 | 284 | ||
304 | for (i = 0; i < 32; i++) { | 285 | for (i = 0; i < 32; i++) { |
305 | if (vic_sources & (1 << i)) { | 286 | if (vic_sources & (1 << i)) { |
306 | unsigned int irq = irq_start + i; | 287 | unsigned int irq = irq_start + i; |
307 | 288 | ||
308 | irq_set_chip_and_handler(irq, &vic_chip, | 289 | irq_set_chip_and_handler(irq, &vic_chip, |
309 | handle_level_irq); | 290 | handle_level_irq); |
310 | irq_set_chip_data(irq, base); | 291 | irq_set_chip_data(irq, base); |
311 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 292 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
312 | } | 293 | } |
313 | } | 294 | } |
314 | } | 295 | } |
315 | 296 | ||
316 | /* | 297 | /* |
317 | * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. | 298 | * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. |
318 | * The original cell has 32 interrupts, while the modified one has 64, | 299 | * The original cell has 32 interrupts, while the modified one has 64, |
319 | * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case | 300 | * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case |
320 | * the probe function is called twice, with base set to offset 000 | 301 | * the probe function is called twice, with base set to offset 000 |
321 | * and 020 within the page. We call this "second block". | 302 | * and 020 within the page. We call this "second block". |
322 | */ | 303 | */ |
323 | static void __init vic_init_st(void __iomem *base, unsigned int irq_start, | 304 | static void __init vic_init_st(void __iomem *base, unsigned int irq_start, |
324 | u32 vic_sources) | 305 | u32 vic_sources) |
325 | { | 306 | { |
326 | unsigned int i; | 307 | unsigned int i; |
327 | int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0; | 308 | int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0; |
328 | 309 | ||
329 | /* Disable all interrupts initially. */ | 310 | /* Disable all interrupts initially. */ |
330 | vic_disable(base); | 311 | vic_disable(base); |
331 | 312 | ||
332 | /* | 313 | /* |
333 | * Make sure we clear all existing interrupts. The vector registers | 314 | * Make sure we clear all existing interrupts. The vector registers |
334 | * in this cell are after the second block of general registers, | 315 | * in this cell are after the second block of general registers, |
335 | * so we can address them using standard offsets, but only from | 316 | * so we can address them using standard offsets, but only from |
336 | * the second base address, which is 0x20 in the page | 317 | * the second base address, which is 0x20 in the page |
337 | */ | 318 | */ |
338 | if (vic_2nd_block) { | 319 | if (vic_2nd_block) { |
339 | vic_clear_interrupts(base); | 320 | vic_clear_interrupts(base); |
340 | 321 | ||
341 | /* ST has 16 vectors as well, but we don't enable them by now */ | 322 | /* ST has 16 vectors as well, but we don't enable them by now */ |
342 | for (i = 0; i < 16; i++) { | 323 | for (i = 0; i < 16; i++) { |
343 | void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4); | 324 | void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4); |
344 | writel(0, reg); | 325 | writel(0, reg); |
345 | } | 326 | } |
346 | 327 | ||
347 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); | 328 | writel(32, base + VIC_PL190_DEF_VECT_ADDR); |
348 | } | 329 | } |
349 | 330 | ||
350 | vic_set_irq_sources(base, irq_start, vic_sources); | 331 | vic_set_irq_sources(base, irq_start, vic_sources); |
351 | } | 332 | } |
352 | 333 | ||
353 | /** | 334 | /** |
354 | * vic_init - initialise a vectored interrupt controller | 335 | * vic_init - initialise a vectored interrupt controller |
355 | * @base: iomem base address | 336 | * @base: iomem base address |
356 | * @irq_start: starting interrupt number, must be muliple of 32 | 337 | * @irq_start: starting interrupt number, must be muliple of 32 |
357 | * @vic_sources: bitmask of interrupt sources to allow | 338 | * @vic_sources: bitmask of interrupt sources to allow |
358 | * @resume_sources: bitmask of interrupt sources to allow for resume | 339 | * @resume_sources: bitmask of interrupt sources to allow for resume |
359 | */ | 340 | */ |
360 | void __init vic_init(void __iomem *base, unsigned int irq_start, | 341 | void __init vic_init(void __iomem *base, unsigned int irq_start, |
361 | u32 vic_sources, u32 resume_sources) | 342 | u32 vic_sources, u32 resume_sources) |
362 | { | 343 | { |
363 | unsigned int i; | 344 | unsigned int i; |
364 | u32 cellid = 0; | 345 | u32 cellid = 0; |
365 | enum amba_vendor vendor; | 346 | enum amba_vendor vendor; |
366 | 347 | ||
367 | /* Identify which VIC cell this one is, by reading the ID */ | 348 | /* Identify which VIC cell this one is, by reading the ID */ |
368 | for (i = 0; i < 4; i++) { | 349 | for (i = 0; i < 4; i++) { |
369 | u32 addr = ((u32)base & PAGE_MASK) + 0xfe0 + (i * 4); | 350 | u32 addr = ((u32)base & PAGE_MASK) + 0xfe0 + (i * 4); |
370 | cellid |= (readl(addr) & 0xff) << (8 * i); | 351 | cellid |= (readl(addr) & 0xff) << (8 * i); |
371 | } | 352 | } |
372 | vendor = (cellid >> 12) & 0xff; | 353 | vendor = (cellid >> 12) & 0xff; |
373 | printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n", | 354 | printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n", |
374 | base, cellid, vendor); | 355 | base, cellid, vendor); |
375 | 356 | ||
376 | switch(vendor) { | 357 | switch(vendor) { |
377 | case AMBA_VENDOR_ST: | 358 | case AMBA_VENDOR_ST: |
378 | vic_init_st(base, irq_start, vic_sources); | 359 | vic_init_st(base, irq_start, vic_sources); |
379 | return; | 360 | return; |
380 | default: | 361 | default: |
381 | printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n"); | 362 | printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n"); |
382 | /* fall through */ | 363 | /* fall through */ |
383 | case AMBA_VENDOR_ARM: | 364 | case AMBA_VENDOR_ARM: |
384 | break; | 365 | break; |
385 | } | 366 | } |
arch/arm/include/asm/mach/time.h
1 | /* | 1 | /* |
2 | * arch/arm/include/asm/mach/time.h | 2 | * arch/arm/include/asm/mach/time.h |
3 | * | 3 | * |
4 | * Copyright (C) 2004 MontaVista Software, Inc. | 4 | * Copyright (C) 2004 MontaVista Software, Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #ifndef __ASM_ARM_MACH_TIME_H | 10 | #ifndef __ASM_ARM_MACH_TIME_H |
11 | #define __ASM_ARM_MACH_TIME_H | 11 | #define __ASM_ARM_MACH_TIME_H |
12 | 12 | ||
13 | #include <linux/sysdev.h> | 13 | #include <linux/sysdev.h> |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * This is our kernel timer structure. | 16 | * This is our kernel timer structure. |
17 | * | 17 | * |
18 | * - init | 18 | * - init |
19 | * Initialise the kernels jiffy timer source, claim interrupt | 19 | * Initialise the kernels jiffy timer source, claim interrupt |
20 | * using setup_irq. This is called early on during initialisation | 20 | * using setup_irq. This is called early on during initialisation |
21 | * while interrupts are still disabled on the local CPU. | 21 | * while interrupts are still disabled on the local CPU. |
22 | * - suspend | 22 | * - suspend |
23 | * Suspend the kernel jiffy timer source, if necessary. This | 23 | * Suspend the kernel jiffy timer source, if necessary. This |
24 | * is called with interrupts disabled, after all normal devices | 24 | * is called with interrupts disabled, after all normal devices |
25 | * have been suspended. If no action is required, set this to | 25 | * have been suspended. If no action is required, set this to |
26 | * NULL. | 26 | * NULL. |
27 | * - resume | 27 | * - resume |
28 | * Resume the kernel jiffy timer source, if necessary. This | 28 | * Resume the kernel jiffy timer source, if necessary. This |
29 | * is called with interrupts disabled before any normal devices | 29 | * is called with interrupts disabled before any normal devices |
30 | * are resumed. If no action is required, set this to NULL. | 30 | * are resumed. If no action is required, set this to NULL. |
31 | * - offset | 31 | * - offset |
32 | * Return the timer offset in microseconds since the last timer | 32 | * Return the timer offset in microseconds since the last timer |
33 | * interrupt. Note: this must take account of any unprocessed | 33 | * interrupt. Note: this must take account of any unprocessed |
34 | * timer interrupt which may be pending. | 34 | * timer interrupt which may be pending. |
35 | */ | 35 | */ |
36 | struct sys_timer { | 36 | struct sys_timer { |
37 | struct sys_device dev; | ||
38 | void (*init)(void); | 37 | void (*init)(void); |
39 | void (*suspend)(void); | 38 | void (*suspend)(void); |
40 | void (*resume)(void); | 39 | void (*resume)(void); |
41 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | 40 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
42 | unsigned long (*offset)(void); | 41 | unsigned long (*offset)(void); |
43 | #endif | 42 | #endif |
44 | }; | 43 | }; |
45 | 44 | ||
46 | extern void timer_tick(void); | 45 | extern void timer_tick(void); |
47 | 46 | ||
48 | #endif | 47 | #endif |
49 | 48 |
arch/arm/kernel/leds.c
1 | /* | 1 | /* |
2 | * LED support code, ripped out of arch/arm/kernel/time.c | 2 | * LED support code, ripped out of arch/arm/kernel/time.c |
3 | * | 3 | * |
4 | * Copyright (C) 1994-2001 Russell King | 4 | * Copyright (C) 1994-2001 Russell King |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/sysdev.h> | 12 | #include <linux/sysdev.h> |
13 | #include <linux/syscore_ops.h> | ||
13 | 14 | ||
14 | #include <asm/leds.h> | 15 | #include <asm/leds.h> |
15 | 16 | ||
16 | static void dummy_leds_event(led_event_t evt) | 17 | static void dummy_leds_event(led_event_t evt) |
17 | { | 18 | { |
18 | } | 19 | } |
19 | 20 | ||
20 | void (*leds_event)(led_event_t) = dummy_leds_event; | 21 | void (*leds_event)(led_event_t) = dummy_leds_event; |
21 | 22 | ||
22 | struct leds_evt_name { | 23 | struct leds_evt_name { |
23 | const char name[8]; | 24 | const char name[8]; |
24 | int on; | 25 | int on; |
25 | int off; | 26 | int off; |
26 | }; | 27 | }; |
27 | 28 | ||
28 | static const struct leds_evt_name evt_names[] = { | 29 | static const struct leds_evt_name evt_names[] = { |
29 | { "amber", led_amber_on, led_amber_off }, | 30 | { "amber", led_amber_on, led_amber_off }, |
30 | { "blue", led_blue_on, led_blue_off }, | 31 | { "blue", led_blue_on, led_blue_off }, |
31 | { "green", led_green_on, led_green_off }, | 32 | { "green", led_green_on, led_green_off }, |
32 | { "red", led_red_on, led_red_off }, | 33 | { "red", led_red_on, led_red_off }, |
33 | }; | 34 | }; |
34 | 35 | ||
35 | static ssize_t leds_store(struct sys_device *dev, | 36 | static ssize_t leds_store(struct sys_device *dev, |
36 | struct sysdev_attribute *attr, | 37 | struct sysdev_attribute *attr, |
37 | const char *buf, size_t size) | 38 | const char *buf, size_t size) |
38 | { | 39 | { |
39 | int ret = -EINVAL, len = strcspn(buf, " "); | 40 | int ret = -EINVAL, len = strcspn(buf, " "); |
40 | 41 | ||
41 | if (len > 0 && buf[len] == '\0') | 42 | if (len > 0 && buf[len] == '\0') |
42 | len--; | 43 | len--; |
43 | 44 | ||
44 | if (strncmp(buf, "claim", len) == 0) { | 45 | if (strncmp(buf, "claim", len) == 0) { |
45 | leds_event(led_claim); | 46 | leds_event(led_claim); |
46 | ret = size; | 47 | ret = size; |
47 | } else if (strncmp(buf, "release", len) == 0) { | 48 | } else if (strncmp(buf, "release", len) == 0) { |
48 | leds_event(led_release); | 49 | leds_event(led_release); |
49 | ret = size; | 50 | ret = size; |
50 | } else { | 51 | } else { |
51 | int i; | 52 | int i; |
52 | 53 | ||
53 | for (i = 0; i < ARRAY_SIZE(evt_names); i++) { | 54 | for (i = 0; i < ARRAY_SIZE(evt_names); i++) { |
54 | if (strlen(evt_names[i].name) != len || | 55 | if (strlen(evt_names[i].name) != len || |
55 | strncmp(buf, evt_names[i].name, len) != 0) | 56 | strncmp(buf, evt_names[i].name, len) != 0) |
56 | continue; | 57 | continue; |
57 | if (strncmp(buf+len, " on", 3) == 0) { | 58 | if (strncmp(buf+len, " on", 3) == 0) { |
58 | leds_event(evt_names[i].on); | 59 | leds_event(evt_names[i].on); |
59 | ret = size; | 60 | ret = size; |
60 | } else if (strncmp(buf+len, " off", 4) == 0) { | 61 | } else if (strncmp(buf+len, " off", 4) == 0) { |
61 | leds_event(evt_names[i].off); | 62 | leds_event(evt_names[i].off); |
62 | ret = size; | 63 | ret = size; |
63 | } | 64 | } |
64 | break; | 65 | break; |
65 | } | 66 | } |
66 | } | 67 | } |
67 | return ret; | 68 | return ret; |
68 | } | 69 | } |
69 | 70 | ||
70 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); | 71 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); |
71 | 72 | ||
72 | static int leds_suspend(struct sys_device *dev, pm_message_t state) | 73 | static struct sysdev_class leds_sysclass = { |
74 | .name = "leds", | ||
75 | }; | ||
76 | |||
77 | static struct sys_device leds_device = { | ||
78 | .id = 0, | ||
79 | .cls = &leds_sysclass, | ||
80 | }; | ||
81 | |||
82 | static int leds_suspend(void) | ||
73 | { | 83 | { |
74 | leds_event(led_stop); | 84 | leds_event(led_stop); |
75 | return 0; | 85 | return 0; |
76 | } | 86 | } |
77 | 87 | ||
78 | static int leds_resume(struct sys_device *dev) | 88 | static void leds_resume(void) |
79 | { | 89 | { |
80 | leds_event(led_start); | 90 | leds_event(led_start); |
81 | return 0; | ||
82 | } | 91 | } |
83 | 92 | ||
84 | static int leds_shutdown(struct sys_device *dev) | 93 | static void leds_shutdown(void) |
85 | { | 94 | { |
86 | leds_event(led_halted); | 95 | leds_event(led_halted); |
87 | return 0; | ||
88 | } | 96 | } |
89 | 97 | ||
90 | static struct sysdev_class leds_sysclass = { | 98 | static struct syscore_ops leds_syscore_ops = { |
91 | .name = "leds", | ||
92 | .shutdown = leds_shutdown, | 99 | .shutdown = leds_shutdown, |
93 | .suspend = leds_suspend, | 100 | .suspend = leds_suspend, |
94 | .resume = leds_resume, | 101 | .resume = leds_resume, |
95 | }; | 102 | }; |
96 | 103 | ||
97 | static struct sys_device leds_device = { | ||
98 | .id = 0, | ||
99 | .cls = &leds_sysclass, | ||
100 | }; | ||
101 | |||
102 | static int __init leds_init(void) | 104 | static int __init leds_init(void) |
103 | { | 105 | { |
104 | int ret; | 106 | int ret; |
105 | ret = sysdev_class_register(&leds_sysclass); | 107 | ret = sysdev_class_register(&leds_sysclass); |
106 | if (ret == 0) | 108 | if (ret == 0) |
107 | ret = sysdev_register(&leds_device); | 109 | ret = sysdev_register(&leds_device); |
108 | if (ret == 0) | 110 | if (ret == 0) |
109 | ret = sysdev_create_file(&leds_device, &attr_event); | 111 | ret = sysdev_create_file(&leds_device, &attr_event); |
112 | if (ret == 0) |
arch/arm/kernel/time.c
1 | /* | 1 | /* |
2 | * linux/arch/arm/kernel/time.c | 2 | * linux/arch/arm/kernel/time.c |
3 | * | 3 | * |
4 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | 4 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds |
5 | * Modifications for ARM (C) 1994-2001 Russell King | 5 | * Modifications for ARM (C) 1994-2001 Russell King |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This file contains the ARM-specific time handling details: | 11 | * This file contains the ARM-specific time handling details: |
12 | * reading the RTC at bootup, etc... | 12 | * reading the RTC at bootup, etc... |
13 | */ | 13 | */ |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/time.h> | 17 | #include <linux/time.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/smp.h> | 20 | #include <linux/smp.h> |
21 | #include <linux/timex.h> | 21 | #include <linux/timex.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/profile.h> | 23 | #include <linux/profile.h> |
24 | #include <linux/sysdev.h> | 24 | #include <linux/syscore_ops.h> |
25 | #include <linux/timer.h> | 25 | #include <linux/timer.h> |
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | 27 | ||
28 | #include <linux/mc146818rtc.h> | 28 | #include <linux/mc146818rtc.h> |
29 | 29 | ||
30 | #include <asm/leds.h> | 30 | #include <asm/leds.h> |
31 | #include <asm/thread_info.h> | 31 | #include <asm/thread_info.h> |
32 | #include <asm/sched_clock.h> | 32 | #include <asm/sched_clock.h> |
33 | #include <asm/stacktrace.h> | 33 | #include <asm/stacktrace.h> |
34 | #include <asm/mach/arch.h> | 34 | #include <asm/mach/arch.h> |
35 | #include <asm/mach/time.h> | 35 | #include <asm/mach/time.h> |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Our system timer. | 38 | * Our system timer. |
39 | */ | 39 | */ |
40 | static struct sys_timer *system_timer; | 40 | static struct sys_timer *system_timer; |
41 | 41 | ||
42 | #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) | 42 | #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) |
43 | /* this needs a better home */ | 43 | /* this needs a better home */ |
44 | DEFINE_SPINLOCK(rtc_lock); | 44 | DEFINE_SPINLOCK(rtc_lock); |
45 | 45 | ||
46 | #ifdef CONFIG_RTC_DRV_CMOS_MODULE | 46 | #ifdef CONFIG_RTC_DRV_CMOS_MODULE |
47 | EXPORT_SYMBOL(rtc_lock); | 47 | EXPORT_SYMBOL(rtc_lock); |
48 | #endif | 48 | #endif |
49 | #endif /* pc-style 'CMOS' RTC support */ | 49 | #endif /* pc-style 'CMOS' RTC support */ |
50 | 50 | ||
51 | /* change this if you have some constant time drift */ | 51 | /* change this if you have some constant time drift */ |
52 | #define USECS_PER_JIFFY (1000000/HZ) | 52 | #define USECS_PER_JIFFY (1000000/HZ) |
53 | 53 | ||
54 | #ifdef CONFIG_SMP | 54 | #ifdef CONFIG_SMP |
55 | unsigned long profile_pc(struct pt_regs *regs) | 55 | unsigned long profile_pc(struct pt_regs *regs) |
56 | { | 56 | { |
57 | struct stackframe frame; | 57 | struct stackframe frame; |
58 | 58 | ||
59 | if (!in_lock_functions(regs->ARM_pc)) | 59 | if (!in_lock_functions(regs->ARM_pc)) |
60 | return regs->ARM_pc; | 60 | return regs->ARM_pc; |
61 | 61 | ||
62 | frame.fp = regs->ARM_fp; | 62 | frame.fp = regs->ARM_fp; |
63 | frame.sp = regs->ARM_sp; | 63 | frame.sp = regs->ARM_sp; |
64 | frame.lr = regs->ARM_lr; | 64 | frame.lr = regs->ARM_lr; |
65 | frame.pc = regs->ARM_pc; | 65 | frame.pc = regs->ARM_pc; |
66 | do { | 66 | do { |
67 | int ret = unwind_frame(&frame); | 67 | int ret = unwind_frame(&frame); |
68 | if (ret < 0) | 68 | if (ret < 0) |
69 | return 0; | 69 | return 0; |
70 | } while (in_lock_functions(frame.pc)); | 70 | } while (in_lock_functions(frame.pc)); |
71 | 71 | ||
72 | return frame.pc; | 72 | return frame.pc; |
73 | } | 73 | } |
74 | EXPORT_SYMBOL(profile_pc); | 74 | EXPORT_SYMBOL(profile_pc); |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | 77 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
78 | u32 arch_gettimeoffset(void) | 78 | u32 arch_gettimeoffset(void) |
79 | { | 79 | { |
80 | if (system_timer->offset != NULL) | 80 | if (system_timer->offset != NULL) |
81 | return system_timer->offset() * 1000; | 81 | return system_timer->offset() * 1000; |
82 | 82 | ||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | #endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ | 85 | #endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ |
86 | 86 | ||
87 | #ifdef CONFIG_LEDS_TIMER | 87 | #ifdef CONFIG_LEDS_TIMER |
88 | static inline void do_leds(void) | 88 | static inline void do_leds(void) |
89 | { | 89 | { |
90 | static unsigned int count = HZ/2; | 90 | static unsigned int count = HZ/2; |
91 | 91 | ||
92 | if (--count == 0) { | 92 | if (--count == 0) { |
93 | count = HZ/2; | 93 | count = HZ/2; |
94 | leds_event(led_timer); | 94 | leds_event(led_timer); |
95 | } | 95 | } |
96 | } | 96 | } |
97 | #else | 97 | #else |
98 | #define do_leds() | 98 | #define do_leds() |
99 | #endif | 99 | #endif |
100 | 100 | ||
101 | 101 | ||
102 | #ifndef CONFIG_GENERIC_CLOCKEVENTS | 102 | #ifndef CONFIG_GENERIC_CLOCKEVENTS |
103 | /* | 103 | /* |
104 | * Kernel system timer support. | 104 | * Kernel system timer support. |
105 | */ | 105 | */ |
106 | void timer_tick(void) | 106 | void timer_tick(void) |
107 | { | 107 | { |
108 | profile_tick(CPU_PROFILING); | 108 | profile_tick(CPU_PROFILING); |
109 | do_leds(); | 109 | do_leds(); |
110 | xtime_update(1); | 110 | xtime_update(1); |
111 | #ifndef CONFIG_SMP | 111 | #ifndef CONFIG_SMP |
112 | update_process_times(user_mode(get_irq_regs())); | 112 | update_process_times(user_mode(get_irq_regs())); |
113 | #endif | 113 | #endif |
114 | } | 114 | } |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | #if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) | 117 | #if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) |
118 | static int timer_suspend(struct sys_device *dev, pm_message_t state) | 118 | static int timer_suspend(void) |
119 | { | 119 | { |
120 | struct sys_timer *timer = container_of(dev, struct sys_timer, dev); | 120 | if (system_timer->suspend) |
121 | system_timer->suspend(); | ||
121 | 122 | ||
122 | if (timer->suspend != NULL) | ||
123 | timer->suspend(); | ||
124 | |||
125 | return 0; | 123 | return 0; |
126 | } | 124 | } |
127 | 125 | ||
128 | static int timer_resume(struct sys_device *dev) | 126 | static void timer_resume(void) |
129 | { | 127 | { |
130 | struct sys_timer *timer = container_of(dev, struct sys_timer, dev); | 128 | if (system_timer->resume) |
131 | 129 | system_timer->resume(); | |
132 | if (timer->resume != NULL) | ||
133 | timer->resume(); | ||
134 | |||
135 | return 0; | ||
136 | } | 130 | } |
137 | #else | 131 | #else |
138 | #define timer_suspend NULL | 132 | #define timer_suspend NULL |
139 | #define timer_resume NULL | 133 | #define timer_resume NULL |
140 | #endif | 134 | #endif |
141 | 135 | ||
142 | static struct sysdev_class timer_sysclass = { | 136 | static struct syscore_ops timer_syscore_ops = { |
143 | .name = "timer", | ||
144 | .suspend = timer_suspend, | 137 | .suspend = timer_suspend, |
145 | .resume = timer_resume, | 138 | .resume = timer_resume, |
146 | }; | 139 | }; |
147 | 140 | ||
148 | static int __init timer_init_sysfs(void) | 141 | static int __init timer_init_syscore_ops(void) |
149 | { | 142 | { |
150 | int ret = sysdev_class_register(&timer_sysclass); | 143 | register_syscore_ops(&timer_syscore_ops); |
151 | if (ret == 0) { | ||
152 | system_timer->dev.cls = &timer_sysclass; | ||
153 | ret = sysdev_register(&system_timer->dev); | ||
154 | } | ||
155 | 144 | ||
156 | return ret; | 145 | return 0; |
157 | } | 146 | } |
158 | 147 | ||
159 | device_initcall(timer_init_sysfs); | 148 | device_initcall(timer_init_syscore_ops); |
160 | 149 | ||
161 | void __init time_init(void) | 150 | void __init time_init(void) |
162 | { | 151 | { |
163 | system_timer = machine_desc->timer; | 152 | system_timer = machine_desc->timer; |
164 | system_timer->init(); | 153 | system_timer->init(); |
165 | #ifdef CONFIG_HAVE_SCHED_CLOCK | 154 | #ifdef CONFIG_HAVE_SCHED_CLOCK |
166 | sched_clock_postinit(); | 155 | sched_clock_postinit(); |
167 | #endif | 156 | #endif |
168 | } | 157 | } |
169 | 158 |
arch/arm/vfp/vfpmodule.c
1 | /* | 1 | /* |
2 | * linux/arch/arm/vfp/vfpmodule.c | 2 | * linux/arch/arm/vfp/vfpmodule.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 ARM Limited. | 4 | * Copyright (C) 2004 ARM Limited. |
5 | * Written by Deep Blue Solutions Limited. | 5 | * Written by Deep Blue Solutions Limited. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/notifier.h> | 15 | #include <linux/notifier.h> |
16 | #include <linux/signal.h> | 16 | #include <linux/signal.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | 20 | ||
21 | #include <asm/cputype.h> | 21 | #include <asm/cputype.h> |
22 | #include <asm/thread_notify.h> | 22 | #include <asm/thread_notify.h> |
23 | #include <asm/vfp.h> | 23 | #include <asm/vfp.h> |
24 | 24 | ||
25 | #include "vfpinstr.h" | 25 | #include "vfpinstr.h" |
26 | #include "vfp.h" | 26 | #include "vfp.h" |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Our undef handlers (in entry.S) | 29 | * Our undef handlers (in entry.S) |
30 | */ | 30 | */ |
31 | void vfp_testing_entry(void); | 31 | void vfp_testing_entry(void); |
32 | void vfp_support_entry(void); | 32 | void vfp_support_entry(void); |
33 | void vfp_null_entry(void); | 33 | void vfp_null_entry(void); |
34 | 34 | ||
35 | void (*vfp_vector)(void) = vfp_null_entry; | 35 | void (*vfp_vector)(void) = vfp_null_entry; |
36 | union vfp_state *last_VFP_context[NR_CPUS]; | 36 | union vfp_state *last_VFP_context[NR_CPUS]; |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Dual-use variable. | 39 | * Dual-use variable. |
40 | * Used in startup: set to non-zero if VFP checks fail | 40 | * Used in startup: set to non-zero if VFP checks fail |
41 | * After startup, holds VFP architecture | 41 | * After startup, holds VFP architecture |
42 | */ | 42 | */ |
43 | unsigned int VFP_arch; | 43 | unsigned int VFP_arch; |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * Per-thread VFP initialization. | 46 | * Per-thread VFP initialization. |
47 | */ | 47 | */ |
48 | static void vfp_thread_flush(struct thread_info *thread) | 48 | static void vfp_thread_flush(struct thread_info *thread) |
49 | { | 49 | { |
50 | union vfp_state *vfp = &thread->vfpstate; | 50 | union vfp_state *vfp = &thread->vfpstate; |
51 | unsigned int cpu; | 51 | unsigned int cpu; |
52 | 52 | ||
53 | memset(vfp, 0, sizeof(union vfp_state)); | 53 | memset(vfp, 0, sizeof(union vfp_state)); |
54 | 54 | ||
55 | vfp->hard.fpexc = FPEXC_EN; | 55 | vfp->hard.fpexc = FPEXC_EN; |
56 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; | 56 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * Disable VFP to ensure we initialize it first. We must ensure | 59 | * Disable VFP to ensure we initialize it first. We must ensure |
60 | * that the modification of last_VFP_context[] and hardware disable | 60 | * that the modification of last_VFP_context[] and hardware disable |
61 | * are done for the same CPU and without preemption. | 61 | * are done for the same CPU and without preemption. |
62 | */ | 62 | */ |
63 | cpu = get_cpu(); | 63 | cpu = get_cpu(); |
64 | if (last_VFP_context[cpu] == vfp) | 64 | if (last_VFP_context[cpu] == vfp) |
65 | last_VFP_context[cpu] = NULL; | 65 | last_VFP_context[cpu] = NULL; |
66 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 66 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
67 | put_cpu(); | 67 | put_cpu(); |
68 | } | 68 | } |
69 | 69 | ||
70 | static void vfp_thread_exit(struct thread_info *thread) | 70 | static void vfp_thread_exit(struct thread_info *thread) |
71 | { | 71 | { |
72 | /* release case: Per-thread VFP cleanup. */ | 72 | /* release case: Per-thread VFP cleanup. */ |
73 | union vfp_state *vfp = &thread->vfpstate; | 73 | union vfp_state *vfp = &thread->vfpstate; |
74 | unsigned int cpu = get_cpu(); | 74 | unsigned int cpu = get_cpu(); |
75 | 75 | ||
76 | if (last_VFP_context[cpu] == vfp) | 76 | if (last_VFP_context[cpu] == vfp) |
77 | last_VFP_context[cpu] = NULL; | 77 | last_VFP_context[cpu] = NULL; |
78 | put_cpu(); | 78 | put_cpu(); |
79 | } | 79 | } |
80 | 80 | ||
81 | static void vfp_thread_copy(struct thread_info *thread) | 81 | static void vfp_thread_copy(struct thread_info *thread) |
82 | { | 82 | { |
83 | struct thread_info *parent = current_thread_info(); | 83 | struct thread_info *parent = current_thread_info(); |
84 | 84 | ||
85 | vfp_sync_hwstate(parent); | 85 | vfp_sync_hwstate(parent); |
86 | thread->vfpstate = parent->vfpstate; | 86 | thread->vfpstate = parent->vfpstate; |
87 | } | 87 | } |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * When this function is called with the following 'cmd's, the following | 90 | * When this function is called with the following 'cmd's, the following |
91 | * is true while this function is being run: | 91 | * is true while this function is being run: |
92 | * THREAD_NOFTIFY_SWTICH: | 92 | * THREAD_NOFTIFY_SWTICH: |
93 | * - the previously running thread will not be scheduled onto another CPU. | 93 | * - the previously running thread will not be scheduled onto another CPU. |
94 | * - the next thread to be run (v) will not be running on another CPU. | 94 | * - the next thread to be run (v) will not be running on another CPU. |
95 | * - thread->cpu is the local CPU number | 95 | * - thread->cpu is the local CPU number |
96 | * - not preemptible as we're called in the middle of a thread switch | 96 | * - not preemptible as we're called in the middle of a thread switch |
97 | * THREAD_NOTIFY_FLUSH: | 97 | * THREAD_NOTIFY_FLUSH: |
98 | * - the thread (v) will be running on the local CPU, so | 98 | * - the thread (v) will be running on the local CPU, so |
99 | * v === current_thread_info() | 99 | * v === current_thread_info() |
100 | * - thread->cpu is the local CPU number at the time it is accessed, | 100 | * - thread->cpu is the local CPU number at the time it is accessed, |
101 | * but may change at any time. | 101 | * but may change at any time. |
102 | * - we could be preempted if tree preempt rcu is enabled, so | 102 | * - we could be preempted if tree preempt rcu is enabled, so |
103 | * it is unsafe to use thread->cpu. | 103 | * it is unsafe to use thread->cpu. |
104 | * THREAD_NOTIFY_EXIT | 104 | * THREAD_NOTIFY_EXIT |
105 | * - the thread (v) will be running on the local CPU, so | 105 | * - the thread (v) will be running on the local CPU, so |
106 | * v === current_thread_info() | 106 | * v === current_thread_info() |
107 | * - thread->cpu is the local CPU number at the time it is accessed, | 107 | * - thread->cpu is the local CPU number at the time it is accessed, |
108 | * but may change at any time. | 108 | * but may change at any time. |
109 | * - we could be preempted if tree preempt rcu is enabled, so | 109 | * - we could be preempted if tree preempt rcu is enabled, so |
110 | * it is unsafe to use thread->cpu. | 110 | * it is unsafe to use thread->cpu. |
111 | */ | 111 | */ |
112 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | 112 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
113 | { | 113 | { |
114 | struct thread_info *thread = v; | 114 | struct thread_info *thread = v; |
115 | u32 fpexc; | 115 | u32 fpexc; |
116 | #ifdef CONFIG_SMP | 116 | #ifdef CONFIG_SMP |
117 | unsigned int cpu; | 117 | unsigned int cpu; |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | switch (cmd) { | 120 | switch (cmd) { |
121 | case THREAD_NOTIFY_SWITCH: | 121 | case THREAD_NOTIFY_SWITCH: |
122 | fpexc = fmrx(FPEXC); | 122 | fpexc = fmrx(FPEXC); |
123 | 123 | ||
124 | #ifdef CONFIG_SMP | 124 | #ifdef CONFIG_SMP |
125 | cpu = thread->cpu; | 125 | cpu = thread->cpu; |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * On SMP, if VFP is enabled, save the old state in | 128 | * On SMP, if VFP is enabled, save the old state in |
129 | * case the thread migrates to a different CPU. The | 129 | * case the thread migrates to a different CPU. The |
130 | * restoring is done lazily. | 130 | * restoring is done lazily. |
131 | */ | 131 | */ |
132 | if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { | 132 | if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { |
133 | vfp_save_state(last_VFP_context[cpu], fpexc); | 133 | vfp_save_state(last_VFP_context[cpu], fpexc); |
134 | last_VFP_context[cpu]->hard.cpu = cpu; | 134 | last_VFP_context[cpu]->hard.cpu = cpu; |
135 | } | 135 | } |
136 | /* | 136 | /* |
137 | * Thread migration, just force the reloading of the | 137 | * Thread migration, just force the reloading of the |
138 | * state on the new CPU in case the VFP registers | 138 | * state on the new CPU in case the VFP registers |
139 | * contain stale data. | 139 | * contain stale data. |
140 | */ | 140 | */ |
141 | if (thread->vfpstate.hard.cpu != cpu) | 141 | if (thread->vfpstate.hard.cpu != cpu) |
142 | last_VFP_context[cpu] = NULL; | 142 | last_VFP_context[cpu] = NULL; |
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * Always disable VFP so we can lazily save/restore the | 146 | * Always disable VFP so we can lazily save/restore the |
147 | * old state. | 147 | * old state. |
148 | */ | 148 | */ |
149 | fmxr(FPEXC, fpexc & ~FPEXC_EN); | 149 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
150 | break; | 150 | break; |
151 | 151 | ||
152 | case THREAD_NOTIFY_FLUSH: | 152 | case THREAD_NOTIFY_FLUSH: |
153 | vfp_thread_flush(thread); | 153 | vfp_thread_flush(thread); |
154 | break; | 154 | break; |
155 | 155 | ||
156 | case THREAD_NOTIFY_EXIT: | 156 | case THREAD_NOTIFY_EXIT: |
157 | vfp_thread_exit(thread); | 157 | vfp_thread_exit(thread); |
158 | break; | 158 | break; |
159 | 159 | ||
160 | case THREAD_NOTIFY_COPY: | 160 | case THREAD_NOTIFY_COPY: |
161 | vfp_thread_copy(thread); | 161 | vfp_thread_copy(thread); |
162 | break; | 162 | break; |
163 | } | 163 | } |
164 | 164 | ||
165 | return NOTIFY_DONE; | 165 | return NOTIFY_DONE; |
166 | } | 166 | } |
167 | 167 | ||
168 | static struct notifier_block vfp_notifier_block = { | 168 | static struct notifier_block vfp_notifier_block = { |
169 | .notifier_call = vfp_notifier, | 169 | .notifier_call = vfp_notifier, |
170 | }; | 170 | }; |
171 | 171 | ||
172 | /* | 172 | /* |
173 | * Raise a SIGFPE for the current process. | 173 | * Raise a SIGFPE for the current process. |
174 | * sicode describes the signal being raised. | 174 | * sicode describes the signal being raised. |
175 | */ | 175 | */ |
176 | static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) | 176 | static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) |
177 | { | 177 | { |
178 | siginfo_t info; | 178 | siginfo_t info; |
179 | 179 | ||
180 | memset(&info, 0, sizeof(info)); | 180 | memset(&info, 0, sizeof(info)); |
181 | 181 | ||
182 | info.si_signo = SIGFPE; | 182 | info.si_signo = SIGFPE; |
183 | info.si_code = sicode; | 183 | info.si_code = sicode; |
184 | info.si_addr = (void __user *)(instruction_pointer(regs) - 4); | 184 | info.si_addr = (void __user *)(instruction_pointer(regs) - 4); |
185 | 185 | ||
186 | /* | 186 | /* |
187 | * This is the same as NWFPE, because it's not clear what | 187 | * This is the same as NWFPE, because it's not clear what |
188 | * this is used for | 188 | * this is used for |
189 | */ | 189 | */ |
190 | current->thread.error_code = 0; | 190 | current->thread.error_code = 0; |
191 | current->thread.trap_no = 6; | 191 | current->thread.trap_no = 6; |
192 | 192 | ||
193 | send_sig_info(SIGFPE, &info, current); | 193 | send_sig_info(SIGFPE, &info, current); |
194 | } | 194 | } |
195 | 195 | ||
196 | static void vfp_panic(char *reason, u32 inst) | 196 | static void vfp_panic(char *reason, u32 inst) |
197 | { | 197 | { |
198 | int i; | 198 | int i; |
199 | 199 | ||
200 | printk(KERN_ERR "VFP: Error: %s\n", reason); | 200 | printk(KERN_ERR "VFP: Error: %s\n", reason); |
201 | printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", | 201 | printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", |
202 | fmrx(FPEXC), fmrx(FPSCR), inst); | 202 | fmrx(FPEXC), fmrx(FPSCR), inst); |
203 | for (i = 0; i < 32; i += 2) | 203 | for (i = 0; i < 32; i += 2) |
204 | printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n", | 204 | printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n", |
205 | i, vfp_get_float(i), i+1, vfp_get_float(i+1)); | 205 | i, vfp_get_float(i), i+1, vfp_get_float(i+1)); |
206 | } | 206 | } |
207 | 207 | ||
208 | /* | 208 | /* |
209 | * Process bitmask of exception conditions. | 209 | * Process bitmask of exception conditions. |
210 | */ | 210 | */ |
211 | static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs) | 211 | static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs) |
212 | { | 212 | { |
213 | int si_code = 0; | 213 | int si_code = 0; |
214 | 214 | ||
215 | pr_debug("VFP: raising exceptions %08x\n", exceptions); | 215 | pr_debug("VFP: raising exceptions %08x\n", exceptions); |
216 | 216 | ||
217 | if (exceptions == VFP_EXCEPTION_ERROR) { | 217 | if (exceptions == VFP_EXCEPTION_ERROR) { |
218 | vfp_panic("unhandled bounce", inst); | 218 | vfp_panic("unhandled bounce", inst); |
219 | vfp_raise_sigfpe(0, regs); | 219 | vfp_raise_sigfpe(0, regs); |
220 | return; | 220 | return; |
221 | } | 221 | } |
222 | 222 | ||
223 | /* | 223 | /* |
224 | * If any of the status flags are set, update the FPSCR. | 224 | * If any of the status flags are set, update the FPSCR. |
225 | * Comparison instructions always return at least one of | 225 | * Comparison instructions always return at least one of |
226 | * these flags set. | 226 | * these flags set. |
227 | */ | 227 | */ |
228 | if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) | 228 | if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) |
229 | fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V); | 229 | fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V); |
230 | 230 | ||
231 | fpscr |= exceptions; | 231 | fpscr |= exceptions; |
232 | 232 | ||
233 | fmxr(FPSCR, fpscr); | 233 | fmxr(FPSCR, fpscr); |
234 | 234 | ||
235 | #define RAISE(stat,en,sig) \ | 235 | #define RAISE(stat,en,sig) \ |
236 | if (exceptions & stat && fpscr & en) \ | 236 | if (exceptions & stat && fpscr & en) \ |
237 | si_code = sig; | 237 | si_code = sig; |
238 | 238 | ||
239 | /* | 239 | /* |
240 | * These are arranged in priority order, least to highest. | 240 | * These are arranged in priority order, least to highest. |
241 | */ | 241 | */ |
242 | RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV); | 242 | RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV); |
243 | RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES); | 243 | RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES); |
244 | RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND); | 244 | RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND); |
245 | RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); | 245 | RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); |
246 | RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV); | 246 | RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV); |
247 | 247 | ||
248 | if (si_code) | 248 | if (si_code) |
249 | vfp_raise_sigfpe(si_code, regs); | 249 | vfp_raise_sigfpe(si_code, regs); |
250 | } | 250 | } |
251 | 251 | ||
252 | /* | 252 | /* |
253 | * Emulate a VFP instruction. | 253 | * Emulate a VFP instruction. |
254 | */ | 254 | */ |
255 | static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) | 255 | static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) |
256 | { | 256 | { |
257 | u32 exceptions = VFP_EXCEPTION_ERROR; | 257 | u32 exceptions = VFP_EXCEPTION_ERROR; |
258 | 258 | ||
259 | pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); | 259 | pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); |
260 | 260 | ||
261 | if (INST_CPRTDO(inst)) { | 261 | if (INST_CPRTDO(inst)) { |
262 | if (!INST_CPRT(inst)) { | 262 | if (!INST_CPRT(inst)) { |
263 | /* | 263 | /* |
264 | * CPDO | 264 | * CPDO |
265 | */ | 265 | */ |
266 | if (vfp_single(inst)) { | 266 | if (vfp_single(inst)) { |
267 | exceptions = vfp_single_cpdo(inst, fpscr); | 267 | exceptions = vfp_single_cpdo(inst, fpscr); |
268 | } else { | 268 | } else { |
269 | exceptions = vfp_double_cpdo(inst, fpscr); | 269 | exceptions = vfp_double_cpdo(inst, fpscr); |
270 | } | 270 | } |
271 | } else { | 271 | } else { |
272 | /* | 272 | /* |
273 | * A CPRT instruction can not appear in FPINST2, nor | 273 | * A CPRT instruction can not appear in FPINST2, nor |
274 | * can it cause an exception. Therefore, we do not | 274 | * can it cause an exception. Therefore, we do not |
275 | * have to emulate it. | 275 | * have to emulate it. |
276 | */ | 276 | */ |
277 | } | 277 | } |
278 | } else { | 278 | } else { |
279 | /* | 279 | /* |
280 | * A CPDT instruction can not appear in FPINST2, nor can | 280 | * A CPDT instruction can not appear in FPINST2, nor can |
281 | * it cause an exception. Therefore, we do not have to | 281 | * it cause an exception. Therefore, we do not have to |
282 | * emulate it. | 282 | * emulate it. |
283 | */ | 283 | */ |
284 | } | 284 | } |
285 | return exceptions & ~VFP_NAN_FLAG; | 285 | return exceptions & ~VFP_NAN_FLAG; |
286 | } | 286 | } |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * Package up a bounce condition. | 289 | * Package up a bounce condition. |
290 | */ | 290 | */ |
291 | void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) | 291 | void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) |
292 | { | 292 | { |
293 | u32 fpscr, orig_fpscr, fpsid, exceptions; | 293 | u32 fpscr, orig_fpscr, fpsid, exceptions; |
294 | 294 | ||
295 | pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); | 295 | pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); |
296 | 296 | ||
297 | /* | 297 | /* |
298 | * At this point, FPEXC can have the following configuration: | 298 | * At this point, FPEXC can have the following configuration: |
299 | * | 299 | * |
300 | * EX DEX IXE | 300 | * EX DEX IXE |
301 | * 0 1 x - synchronous exception | 301 | * 0 1 x - synchronous exception |
302 | * 1 x 0 - asynchronous exception | 302 | * 1 x 0 - asynchronous exception |
303 | * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later | 303 | * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later |
304 | * 0 0 1 - synchronous on VFP9 (non-standard subarch 1 | 304 | * 0 0 1 - synchronous on VFP9 (non-standard subarch 1 |
305 | * implementation), undefined otherwise | 305 | * implementation), undefined otherwise |
306 | * | 306 | * |
307 | * Clear various bits and enable access to the VFP so we can | 307 | * Clear various bits and enable access to the VFP so we can |
308 | * handle the bounce. | 308 | * handle the bounce. |
309 | */ | 309 | */ |
310 | fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)); | 310 | fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)); |
311 | 311 | ||
312 | fpsid = fmrx(FPSID); | 312 | fpsid = fmrx(FPSID); |
313 | orig_fpscr = fpscr = fmrx(FPSCR); | 313 | orig_fpscr = fpscr = fmrx(FPSCR); |
314 | 314 | ||
315 | /* | 315 | /* |
316 | * Check for the special VFP subarch 1 and FPSCR.IXE bit case | 316 | * Check for the special VFP subarch 1 and FPSCR.IXE bit case |
317 | */ | 317 | */ |
318 | if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT) | 318 | if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT) |
319 | && (fpscr & FPSCR_IXE)) { | 319 | && (fpscr & FPSCR_IXE)) { |
320 | /* | 320 | /* |
321 | * Synchronous exception, emulate the trigger instruction | 321 | * Synchronous exception, emulate the trigger instruction |
322 | */ | 322 | */ |
323 | goto emulate; | 323 | goto emulate; |
324 | } | 324 | } |
325 | 325 | ||
326 | if (fpexc & FPEXC_EX) { | 326 | if (fpexc & FPEXC_EX) { |
327 | #ifndef CONFIG_CPU_FEROCEON | 327 | #ifndef CONFIG_CPU_FEROCEON |
328 | /* | 328 | /* |
329 | * Asynchronous exception. The instruction is read from FPINST | 329 | * Asynchronous exception. The instruction is read from FPINST |
330 | * and the interrupted instruction has to be restarted. | 330 | * and the interrupted instruction has to be restarted. |
331 | */ | 331 | */ |
332 | trigger = fmrx(FPINST); | 332 | trigger = fmrx(FPINST); |
333 | regs->ARM_pc -= 4; | 333 | regs->ARM_pc -= 4; |
334 | #endif | 334 | #endif |
335 | } else if (!(fpexc & FPEXC_DEX)) { | 335 | } else if (!(fpexc & FPEXC_DEX)) { |
336 | /* | 336 | /* |
337 | * Illegal combination of bits. It can be caused by an | 337 | * Illegal combination of bits. It can be caused by an |
338 | * unallocated VFP instruction but with FPSCR.IXE set and not | 338 | * unallocated VFP instruction but with FPSCR.IXE set and not |
339 | * on VFP subarch 1. | 339 | * on VFP subarch 1. |
340 | */ | 340 | */ |
341 | vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); | 341 | vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); |
342 | goto exit; | 342 | goto exit; |
343 | } | 343 | } |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * Modify fpscr to indicate the number of iterations remaining. | 346 | * Modify fpscr to indicate the number of iterations remaining. |
347 | * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates | 347 | * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates |
348 | * whether FPEXC.VECITR or FPSCR.LEN is used. | 348 | * whether FPEXC.VECITR or FPSCR.LEN is used. |
349 | */ | 349 | */ |
350 | if (fpexc & (FPEXC_EX | FPEXC_VV)) { | 350 | if (fpexc & (FPEXC_EX | FPEXC_VV)) { |
351 | u32 len; | 351 | u32 len; |
352 | 352 | ||
353 | len = fpexc + (1 << FPEXC_LENGTH_BIT); | 353 | len = fpexc + (1 << FPEXC_LENGTH_BIT); |
354 | 354 | ||
355 | fpscr &= ~FPSCR_LENGTH_MASK; | 355 | fpscr &= ~FPSCR_LENGTH_MASK; |
356 | fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); | 356 | fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); |
357 | } | 357 | } |
358 | 358 | ||
359 | /* | 359 | /* |
360 | * Handle the first FP instruction. We used to take note of the | 360 | * Handle the first FP instruction. We used to take note of the |
361 | * FPEXC bounce reason, but this appears to be unreliable. | 361 | * FPEXC bounce reason, but this appears to be unreliable. |
362 | * Emulate the bounced instruction instead. | 362 | * Emulate the bounced instruction instead. |
363 | */ | 363 | */ |
364 | exceptions = vfp_emulate_instruction(trigger, fpscr, regs); | 364 | exceptions = vfp_emulate_instruction(trigger, fpscr, regs); |
365 | if (exceptions) | 365 | if (exceptions) |
366 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); | 366 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); |
367 | 367 | ||
368 | /* | 368 | /* |
369 | * If there isn't a second FP instruction, exit now. Note that | 369 | * If there isn't a second FP instruction, exit now. Note that |
370 | * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. | 370 | * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. |
371 | */ | 371 | */ |
372 | if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) | 372 | if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) |
373 | goto exit; | 373 | goto exit; |
374 | 374 | ||
375 | /* | 375 | /* |
376 | * The barrier() here prevents fpinst2 being read | 376 | * The barrier() here prevents fpinst2 being read |
377 | * before the condition above. | 377 | * before the condition above. |
378 | */ | 378 | */ |
379 | barrier(); | 379 | barrier(); |
380 | trigger = fmrx(FPINST2); | 380 | trigger = fmrx(FPINST2); |
381 | 381 | ||
382 | emulate: | 382 | emulate: |
383 | exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); | 383 | exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); |
384 | if (exceptions) | 384 | if (exceptions) |
385 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); | 385 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); |
386 | exit: | 386 | exit: |
387 | preempt_enable(); | 387 | preempt_enable(); |
388 | } | 388 | } |
389 | 389 | ||
390 | static void vfp_enable(void *unused) | 390 | static void vfp_enable(void *unused) |
391 | { | 391 | { |
392 | u32 access = get_copro_access(); | 392 | u32 access = get_copro_access(); |
393 | 393 | ||
394 | /* | 394 | /* |
395 | * Enable full access to VFP (cp10 and cp11) | 395 | * Enable full access to VFP (cp10 and cp11) |
396 | */ | 396 | */ |
397 | set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); | 397 | set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); |
398 | } | 398 | } |
399 | 399 | ||
400 | #ifdef CONFIG_PM | 400 | #ifdef CONFIG_PM |
401 | #include <linux/sysdev.h> | 401 | #include <linux/syscore_ops.h> |
402 | 402 | ||
403 | static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) | 403 | static int vfp_pm_suspend(void) |
404 | { | 404 | { |
405 | struct thread_info *ti = current_thread_info(); | 405 | struct thread_info *ti = current_thread_info(); |
406 | u32 fpexc = fmrx(FPEXC); | 406 | u32 fpexc = fmrx(FPEXC); |
407 | 407 | ||
408 | /* if vfp is on, then save state for resumption */ | 408 | /* if vfp is on, then save state for resumption */ |
409 | if (fpexc & FPEXC_EN) { | 409 | if (fpexc & FPEXC_EN) { |
410 | printk(KERN_DEBUG "%s: saving vfp state\n", __func__); | 410 | printk(KERN_DEBUG "%s: saving vfp state\n", __func__); |
411 | vfp_save_state(&ti->vfpstate, fpexc); | 411 | vfp_save_state(&ti->vfpstate, fpexc); |
412 | 412 | ||
413 | /* disable, just in case */ | 413 | /* disable, just in case */ |
414 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 414 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
415 | } | 415 | } |
416 | 416 | ||
417 | /* clear any information we had about last context state */ | 417 | /* clear any information we had about last context state */ |
418 | memset(last_VFP_context, 0, sizeof(last_VFP_context)); | 418 | memset(last_VFP_context, 0, sizeof(last_VFP_context)); |
419 | 419 | ||
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | 422 | ||
423 | static int vfp_pm_resume(struct sys_device *dev) | 423 | static void vfp_pm_resume(void) |
424 | { | 424 | { |
425 | /* ensure we have access to the vfp */ | 425 | /* ensure we have access to the vfp */ |
426 | vfp_enable(NULL); | 426 | vfp_enable(NULL); |
427 | 427 | ||
428 | /* and disable it to ensure the next usage restores the state */ | 428 | /* and disable it to ensure the next usage restores the state */ |
429 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 429 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
430 | |||
431 | return 0; | ||
432 | } | 430 | } |
433 | 431 | ||
434 | static struct sysdev_class vfp_pm_sysclass = { | 432 | static struct syscore_ops vfp_pm_syscore_ops = { |
435 | .name = "vfp", | ||
436 | .suspend = vfp_pm_suspend, | 433 | .suspend = vfp_pm_suspend, |
437 | .resume = vfp_pm_resume, | 434 | .resume = vfp_pm_resume, |
438 | }; | 435 | }; |
439 | 436 | ||
440 | static struct sys_device vfp_pm_sysdev = { | ||
441 | .cls = &vfp_pm_sysclass, | ||
442 | }; | ||
443 | |||
444 | static void vfp_pm_init(void) | 437 | static void vfp_pm_init(void) |
445 | { | 438 | { |
446 | sysdev_class_register(&vfp_pm_sysclass); | 439 | register_syscore_ops(&vfp_pm_syscore_ops); |
447 | sysdev_register(&vfp_pm_sysdev); | ||
448 | } | 440 | } |
449 | |||
450 | 441 | ||
451 | #else | 442 | #else |
452 | static inline void vfp_pm_init(void) { } | 443 | static inline void vfp_pm_init(void) { } |
453 | #endif /* CONFIG_PM */ | 444 | #endif /* CONFIG_PM */ |
454 | 445 | ||
455 | void vfp_sync_hwstate(struct thread_info *thread) | 446 | void vfp_sync_hwstate(struct thread_info *thread) |
456 | { | 447 | { |
457 | unsigned int cpu = get_cpu(); | 448 | unsigned int cpu = get_cpu(); |
458 | 449 | ||
459 | /* | 450 | /* |
460 | * If the thread we're interested in is the current owner of the | 451 | * If the thread we're interested in is the current owner of the |
461 | * hardware VFP state, then we need to save its state. | 452 | * hardware VFP state, then we need to save its state. |
462 | */ | 453 | */ |
463 | if (last_VFP_context[cpu] == &thread->vfpstate) { | 454 | if (last_VFP_context[cpu] == &thread->vfpstate) { |
464 | u32 fpexc = fmrx(FPEXC); | 455 | u32 fpexc = fmrx(FPEXC); |
465 | 456 | ||
466 | /* | 457 | /* |
467 | * Save the last VFP state on this CPU. | 458 | * Save the last VFP state on this CPU. |
468 | */ | 459 | */ |
469 | fmxr(FPEXC, fpexc | FPEXC_EN); | 460 | fmxr(FPEXC, fpexc | FPEXC_EN); |
470 | vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); | 461 | vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); |
471 | fmxr(FPEXC, fpexc); | 462 | fmxr(FPEXC, fpexc); |
472 | } | 463 | } |
473 | 464 | ||
474 | put_cpu(); | 465 | put_cpu(); |
475 | } | 466 | } |
476 | 467 | ||
477 | void vfp_flush_hwstate(struct thread_info *thread) | 468 | void vfp_flush_hwstate(struct thread_info *thread) |
478 | { | 469 | { |
479 | unsigned int cpu = get_cpu(); | 470 | unsigned int cpu = get_cpu(); |
480 | 471 | ||
481 | /* | 472 | /* |
482 | * If the thread we're interested in is the current owner of the | 473 | * If the thread we're interested in is the current owner of the |
483 | * hardware VFP state, then we need to save its state. | 474 | * hardware VFP state, then we need to save its state. |
484 | */ | 475 | */ |
485 | if (last_VFP_context[cpu] == &thread->vfpstate) { | 476 | if (last_VFP_context[cpu] == &thread->vfpstate) { |
486 | u32 fpexc = fmrx(FPEXC); | 477 | u32 fpexc = fmrx(FPEXC); |
487 | 478 | ||
488 | fmxr(FPEXC, fpexc & ~FPEXC_EN); | 479 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
489 | 480 | ||
490 | /* | 481 | /* |
491 | * Set the context to NULL to force a reload the next time | 482 | * Set the context to NULL to force a reload the next time |
492 | * the thread uses the VFP. | 483 | * the thread uses the VFP. |
493 | */ | 484 | */ |
494 | last_VFP_context[cpu] = NULL; | 485 | last_VFP_context[cpu] = NULL; |
495 | } | 486 | } |
496 | 487 | ||
497 | #ifdef CONFIG_SMP | 488 | #ifdef CONFIG_SMP |
498 | /* | 489 | /* |
499 | * For SMP we still have to take care of the case where the thread | 490 | * For SMP we still have to take care of the case where the thread |
500 | * migrates to another CPU and then back to the original CPU on which | 491 | * migrates to another CPU and then back to the original CPU on which |
501 | * the last VFP user is still the same thread. Mark the thread VFP | 492 | * the last VFP user is still the same thread. Mark the thread VFP |
502 | * state as belonging to a non-existent CPU so that the saved one will | 493 | * state as belonging to a non-existent CPU so that the saved one will |
503 | * be reloaded in the above case. | 494 | * be reloaded in the above case. |
504 | */ | 495 | */ |
505 | thread->vfpstate.hard.cpu = NR_CPUS; | 496 | thread->vfpstate.hard.cpu = NR_CPUS; |
506 | #endif | 497 | #endif |
507 | put_cpu(); | 498 | put_cpu(); |
508 | } | 499 | } |
509 | 500 | ||
510 | /* | 501 | /* |
511 | * VFP hardware can lose all context when a CPU goes offline. | 502 | * VFP hardware can lose all context when a CPU goes offline. |
512 | * As we will be running in SMP mode with CPU hotplug, we will save the | 503 | * As we will be running in SMP mode with CPU hotplug, we will save the |
513 | * hardware state at every thread switch. We clear our held state when | 504 | * hardware state at every thread switch. We clear our held state when |
514 | * a CPU has been killed, indicating that the VFP hardware doesn't contain | 505 | * a CPU has been killed, indicating that the VFP hardware doesn't contain |
515 | * a threads VFP state. When a CPU starts up, we re-enable access to the | 506 | * a threads VFP state. When a CPU starts up, we re-enable access to the |
516 | * VFP hardware. | 507 | * VFP hardware. |
517 | * | 508 | * |
518 | * Both CPU_DYING and CPU_STARTING are called on the CPU which | 509 | * Both CPU_DYING and CPU_STARTING are called on the CPU which |
519 | * is being offlined/onlined. | 510 | * is being offlined/onlined. |
520 | */ | 511 | */ |
521 | static int vfp_hotplug(struct notifier_block *b, unsigned long action, | 512 | static int vfp_hotplug(struct notifier_block *b, unsigned long action, |
522 | void *hcpu) | 513 | void *hcpu) |
523 | { | 514 | { |
524 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { | 515 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { |
525 | unsigned int cpu = (long)hcpu; | 516 | unsigned int cpu = (long)hcpu; |
526 | last_VFP_context[cpu] = NULL; | 517 | last_VFP_context[cpu] = NULL; |
527 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 518 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
528 | vfp_enable(NULL); | 519 | vfp_enable(NULL); |
529 | return NOTIFY_OK; | 520 | return NOTIFY_OK; |
530 | } | 521 | } |
531 | 522 | ||
532 | /* | 523 | /* |
533 | * VFP support code initialisation. | 524 | * VFP support code initialisation. |
534 | */ | 525 | */ |
535 | static int __init vfp_init(void) | 526 | static int __init vfp_init(void) |
536 | { | 527 | { |
537 | unsigned int vfpsid; | 528 | unsigned int vfpsid; |
538 | unsigned int cpu_arch = cpu_architecture(); | 529 | unsigned int cpu_arch = cpu_architecture(); |
539 | 530 | ||
540 | if (cpu_arch >= CPU_ARCH_ARMv6) | 531 | if (cpu_arch >= CPU_ARCH_ARMv6) |
541 | vfp_enable(NULL); | 532 | vfp_enable(NULL); |
542 | 533 | ||
543 | /* | 534 | /* |
544 | * First check that there is a VFP that we can use. | 535 | * First check that there is a VFP that we can use. |
545 | * The handler is already setup to just log calls, so | 536 | * The handler is already setup to just log calls, so |
546 | * we just need to read the VFPSID register. | 537 | * we just need to read the VFPSID register. |
547 | */ | 538 | */ |
548 | vfp_vector = vfp_testing_entry; | 539 | vfp_vector = vfp_testing_entry; |
549 | barrier(); | 540 | barrier(); |
550 | vfpsid = fmrx(FPSID); | 541 | vfpsid = fmrx(FPSID); |
551 | barrier(); | 542 | barrier(); |
552 | vfp_vector = vfp_null_entry; | 543 | vfp_vector = vfp_null_entry; |
553 | 544 | ||
554 | printk(KERN_INFO "VFP support v0.3: "); | 545 | printk(KERN_INFO "VFP support v0.3: "); |
555 | if (VFP_arch) | 546 | if (VFP_arch) |
556 | printk("not present\n"); | 547 | printk("not present\n"); |
557 | else if (vfpsid & FPSID_NODOUBLE) { | 548 | else if (vfpsid & FPSID_NODOUBLE) { |
558 | printk("no double precision support\n"); | 549 | printk("no double precision support\n"); |
559 | } else { | 550 | } else { |
560 | hotcpu_notifier(vfp_hotplug, 0); | 551 | hotcpu_notifier(vfp_hotplug, 0); |
561 | 552 | ||
562 | smp_call_function(vfp_enable, NULL, 1); | 553 | smp_call_function(vfp_enable, NULL, 1); |
563 | 554 | ||
564 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ | 555 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ |
565 | printk("implementor %02x architecture %d part %02x variant %x rev %x\n", | 556 | printk("implementor %02x architecture %d part %02x variant %x rev %x\n", |
566 | (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, | 557 | (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, |
567 | (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, | 558 | (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, |
568 | (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, | 559 | (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, |
569 | (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, | 560 | (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, |
570 | (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); | 561 | (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); |
571 | 562 | ||
572 | vfp_vector = vfp_support_entry; | 563 | vfp_vector = vfp_support_entry; |
573 | 564 | ||
574 | thread_register_notifier(&vfp_notifier_block); | 565 | thread_register_notifier(&vfp_notifier_block); |
575 | vfp_pm_init(); | 566 | vfp_pm_init(); |
576 | 567 | ||
577 | /* | 568 | /* |
578 | * We detected VFP, and the support code is | 569 | * We detected VFP, and the support code is |
579 | * in place; report VFP support to userspace. | 570 | * in place; report VFP support to userspace. |
580 | */ | 571 | */ |
581 | elf_hwcap |= HWCAP_VFP; | 572 | elf_hwcap |= HWCAP_VFP; |
582 | #ifdef CONFIG_VFPv3 | 573 | #ifdef CONFIG_VFPv3 |
583 | if (VFP_arch >= 2) { | 574 | if (VFP_arch >= 2) { |
584 | elf_hwcap |= HWCAP_VFPv3; | 575 | elf_hwcap |= HWCAP_VFPv3; |
585 | 576 | ||
586 | /* | 577 | /* |
587 | * Check for VFPv3 D16. CPUs in this configuration | 578 | * Check for VFPv3 D16. CPUs in this configuration |
588 | * only have 16 x 64bit registers. | 579 | * only have 16 x 64bit registers. |
589 | */ | 580 | */ |
590 | if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) | 581 | if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) |
591 | elf_hwcap |= HWCAP_VFPv3D16; | 582 | elf_hwcap |= HWCAP_VFPv3D16; |
592 | } | 583 | } |
593 | #endif | 584 | #endif |
594 | #ifdef CONFIG_NEON | 585 | #ifdef CONFIG_NEON |
595 | /* | 586 | /* |
596 | * Check for the presence of the Advanced SIMD | 587 | * Check for the presence of the Advanced SIMD |
597 | * load/store instructions, integer and single | 588 | * load/store instructions, integer and single |
598 | * precision floating point operations. Only check | 589 | * precision floating point operations. Only check |
599 | * for NEON if the hardware has the MVFR registers. | 590 | * for NEON if the hardware has the MVFR registers. |
600 | */ | 591 | */ |
601 | if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { | 592 | if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { |
602 | if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) | 593 | if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) |
603 | elf_hwcap |= HWCAP_NEON; | 594 | elf_hwcap |= HWCAP_NEON; |
604 | } | 595 | } |
605 | #endif | 596 | #endif |
606 | } | 597 | } |
607 | return 0; | 598 | return 0; |
608 | } | 599 | } |
609 | 600 | ||
610 | late_initcall(vfp_init); | 601 | late_initcall(vfp_init); |
611 | 602 |